ARM: do not generate BLX instructions on Cortex-M CPUs.

Particularly on MachO, we were generating "blx _dest" instructions on M-class
CPUs, which don't actually exist. They happen to get fixed up by the linker
into valid "bl _dest" instructions (which is why such a massive issue has
remained largely undetected), but we shouldn't rely on that.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214959 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2014-08-06 11:13:14 +00:00
parent 08828a979a
commit 2c0d42ac9a
7 changed files with 35 additions and 12 deletions

View File

@ -1659,7 +1659,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
bool isStub = (isExt && Subtarget->isTargetMachO()) &&
getTargetMachine().getRelocationModel() != Reloc::Static;
isARMFunc = !Subtarget->isThumb() || isStub;
isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
// ARM call to a local ARM function is predicable.
isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
// tBX takes a register source operand.
@ -1695,7 +1695,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
isDirect = true;
bool isStub = Subtarget->isTargetMachO() &&
getTargetMachine().getRelocationModel() != Reloc::Static;
isARMFunc = !Subtarget->isThumb() || isStub;
isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
// tBX takes a register source operand.
const char *Sym = S->getSymbol();
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {

View File

@ -1355,7 +1355,7 @@ def : T1Pat<(ARMtcall texternalsym:$func), (tBL texternalsym:$func)>,
Requires<[IsThumb]>;
def : Tv5Pat<(ARMcall texternalsym:$func), (tBLXi texternalsym:$func)>,
Requires<[IsThumb, HasV5T]>;
Requires<[IsThumb, HasV5T, IsNotMClass]>;
// Indirect calls to ARM routines
def : Tv5Pat<(ARMcall GPR:$dst), (tBLXr GPR:$dst)>,

View File

@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s
; RUN: llc < %s -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-T1
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs -mcpu=cortex-m0 | FileCheck %s --check-prefix=CHECK-T1
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs -mcpu=cortex-m0 | FileCheck %s --check-prefix=CHECK-M0
define void @func(i32 %argc, i8** %argv) nounwind {
entry:
@ -27,48 +27,56 @@ entry:
; CHECK: add
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_add_4
; CHECK-M0: bl ___sync_fetch_and_add_4
%0 = atomicrmw add i32* %val1, i32 %tmp monotonic
store i32 %0, i32* %old
; CHECK: ldrex
; CHECK: sub
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_sub_4
; CHECK-M0: bl ___sync_fetch_and_sub_4
%1 = atomicrmw sub i32* %val2, i32 30 monotonic
store i32 %1, i32* %old
; CHECK: ldrex
; CHECK: add
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_add_4
; CHECK-M0: bl ___sync_fetch_and_add_4
%2 = atomicrmw add i32* %val2, i32 1 monotonic
store i32 %2, i32* %old
; CHECK: ldrex
; CHECK: sub
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_sub_4
; CHECK-M0: bl ___sync_fetch_and_sub_4
%3 = atomicrmw sub i32* %val2, i32 1 monotonic
store i32 %3, i32* %old
; CHECK: ldrex
; CHECK: and
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_and_4
; CHECK-M0: bl ___sync_fetch_and_and_4
%4 = atomicrmw and i32* %andt, i32 4080 monotonic
store i32 %4, i32* %old
; CHECK: ldrex
; CHECK: or
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_or_4
; CHECK-M0: bl ___sync_fetch_and_or_4
%5 = atomicrmw or i32* %ort, i32 4080 monotonic
store i32 %5, i32* %old
; CHECK: ldrex
; CHECK: eor
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_xor_4
; CHECK-M0: bl ___sync_fetch_and_xor_4
%6 = atomicrmw xor i32* %xort, i32 4080 monotonic
store i32 %6, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_min_4
; CHECK-M0: bl ___sync_fetch_and_min_4
%7 = atomicrmw min i32* %val2, i32 16 monotonic
store i32 %7, i32* %old
%neg = sub i32 0, 1
@ -76,24 +84,28 @@ entry:
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_min_4
; CHECK-M0: bl ___sync_fetch_and_min_4
%8 = atomicrmw min i32* %val2, i32 %neg monotonic
store i32 %8, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_max_4
; CHECK-M0: bl ___sync_fetch_and_max_4
%9 = atomicrmw max i32* %val2, i32 1 monotonic
store i32 %9, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_max_4
; CHECK-M0: bl ___sync_fetch_and_max_4
%10 = atomicrmw max i32* %val2, i32 0 monotonic
store i32 %10, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umin_4
; CHECK-M0: bl ___sync_fetch_and_umin_4
%11 = atomicrmw umin i32* %val2, i32 16 monotonic
store i32 %11, i32* %old
%uneg = sub i32 0, 1
@ -101,18 +113,21 @@ entry:
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umin_4
; CHECK-M0: bl ___sync_fetch_and_umin_4
%12 = atomicrmw umin i32* %val2, i32 %uneg monotonic
store i32 %12, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umax_4
; CHECK-M0: bl ___sync_fetch_and_umax_4
%13 = atomicrmw umax i32* %val2, i32 1 monotonic
store i32 %13, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umax_4
; CHECK-M0: bl ___sync_fetch_and_umax_4
%14 = atomicrmw umax i32* %val2, i32 0 monotonic
store i32 %14, i32* %old
@ -128,6 +143,7 @@ entry:
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umin_2
; CHECK-M0: bl ___sync_fetch_and_umin_2
%0 = atomicrmw umin i16* %val, i16 16 monotonic
store i16 %0, i16* %old
%uneg = sub i16 0, 1
@ -135,18 +151,21 @@ entry:
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umin_2
; CHECK-M0: bl ___sync_fetch_and_umin_2
%1 = atomicrmw umin i16* %val, i16 %uneg monotonic
store i16 %1, i16* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umax_2
; CHECK-M0: bl ___sync_fetch_and_umax_2
%2 = atomicrmw umax i16* %val, i16 1 monotonic
store i16 %2, i16* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umax_2
; CHECK-M0: bl ___sync_fetch_and_umax_2
%3 = atomicrmw umax i16* %val, i16 0 monotonic
store i16 %3, i16* %old
ret void
@ -161,12 +180,14 @@ entry:
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umin_1
; CHECK-M0: bl ___sync_fetch_and_umin_1
%0 = atomicrmw umin i8* %val, i8 16 monotonic
store i8 %0, i8* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umin_1
; CHECK-M0: bl ___sync_fetch_and_umin_1
%uneg = sub i8 0, 1
%1 = atomicrmw umin i8* %val, i8 %uneg monotonic
store i8 %1, i8* %old
@ -174,12 +195,14 @@ entry:
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umax_1
; CHECK-M0: bl ___sync_fetch_and_umax_1
%2 = atomicrmw umax i8* %val, i8 1 monotonic
store i8 %2, i8* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
; CHECK-T1: blx ___sync_fetch_and_umax_1
; CHECK-M0: bl ___sync_fetch_and_umax_1
%3 = atomicrmw umax i8* %val, i8 0 monotonic
store i8 %3, i8* %old
ret void

View File

@ -7,7 +7,7 @@ define float @float_op(float %lhs, float %rhs) {
%sum = fadd float %lhs, %rhs
ret float %sum
; CHECK-M3-LABEL: float_op:
; CHECK-M3: blx ___addsf3
; CHECK-M3: bl ___addsf3
; CHECK-M4-LABEL: float_op:
; CHECK-M4: vadd.f32
@ -17,8 +17,8 @@ define double @double_op(double %lhs, double %rhs) {
%sum = fadd double %lhs, %rhs
ret double %sum
; CHECK-M3-LABEL: double_op:
; CHECK-M3: blx ___adddf3
; CHECK-M3: bl ___adddf3
; CHECK-M4-LABEL: double_op:
; CHECK-M4: blx ___adddf3
; CHECK-M4: bl ___adddf3
}

View File

@ -40,7 +40,7 @@ define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" {
; CHECK-M: mov r4, sp
; CHECK-M: bic r4, r4, #7
; CHECK-M: mov sp, r4
; CHECK-M: blx _bar
; CHECK-M: bl _bar
; CHECK-M: sub.w r4, r11, #8
; CHECK-M: mov sp, r4
; CHECK-M: pop.w {r4, r10, r11, pc}

View File

@ -84,7 +84,7 @@ define float @test_softfloat_calls(float %in) {
; Soft-float calls should be GNU-style rather than RTABI and should not be the
; *vfp variants used for ARMv6 iOS.
; CHECK: blx ___addsf3{{$}}
; CHECK: bl ___addsf3{{$}}
ret float %sum
}

View File

@ -6,7 +6,7 @@
define float @foo(float %a, float %b) {
entry:
; CHECK-LABEL: foo:
; CORTEXM3: blx ___mulsf3
; CORTEXM3: bl ___mulsf3
; CORTEXM4: vmul.f32 s
; CORTEXA8: vmul.f32 d
%0 = fmul float %a, %b
@ -17,8 +17,8 @@ define double @bar(double %a, double %b) {
entry:
; CHECK-LABEL: bar:
%0 = fmul double %a, %b
; CORTEXM3: blx ___muldf3
; CORTEXM4: blx ___muldf3
; CORTEXM3: bl ___muldf3
; CORTEXM4: bl ___muldf3
; CORTEXA8: vmul.f64 d
ret double %0
}