diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll index 92c0f0a18ce..206b96cd076 100644 --- a/test/CodeGen/ARM/reg_sequence.ll +++ b/test/CodeGen/ARM/reg_sequence.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -; RUN: llc < %s -march=arm -mcpu=cortex-a8 -regalloc=basic | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -regalloc=basic | FileCheck %s ; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's. %struct.int16x8_t = type { <8 x i16> } diff --git a/test/CodeGen/ARM/twoaddrinstr.ll b/test/CodeGen/ARM/twoaddrinstr.ll index 78202bf1855..fc2aa1e568e 100644 --- a/test/CodeGen/ARM/twoaddrinstr.ll +++ b/test/CodeGen/ARM/twoaddrinstr.ll @@ -12,10 +12,10 @@ define void @PR13378() nounwind { ; CHECK-NEXT: vst1.32 entry: - %0 = load <4 x float>* undef - store <4 x float> zeroinitializer, <4 x float>* undef - store <4 x float> %0, <4 x float>* undef + %0 = load <4 x float>* undef, align 4 + store <4 x float> zeroinitializer, <4 x float>* undef, align 4 + store <4 x float> %0, <4 x float>* undef, align 4 %1 = insertelement <4 x float> %0, float 1.000000e+00, i32 3 - store <4 x float> %1, <4 x float>* undef + store <4 x float> %1, <4 x float>* undef, align 4 unreachable } diff --git a/test/CodeGen/ARM/vbsl-constant.ll b/test/CodeGen/ARM/vbsl-constant.ll index 75420375274..ffda0a51bdd 100644 --- a/test/CodeGen/ARM/vbsl-constant.ll +++ b/test/CodeGen/ARM/vbsl-constant.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios -mattr=+neon | FileCheck %s define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { ;CHECK: v_bsli8: