Tests: Use CHECK-LABEL where possible

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192403 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Matthias Braun 2013-10-10 22:37:47 +00:00
parent 03d9609c61
commit 82eb6198c8
4 changed files with 15 additions and 15 deletions

View File

@ -82,7 +82,7 @@ KBBlockZero.exit: ; preds = %bb2.i
; <rdar://problem/14379453> ; <rdar://problem/14379453>
; Hard-coded registers comes from the ABI. ; Hard-coded registers comes from the ABI.
; CHECK: wrapDistance: ; CHECK-LABEL: wrapDistance:
; CHECK: cmp r1, #59 ; CHECK: cmp r1, #59
; CHECK-NEXT: itt le ; CHECK-NEXT: itt le
; CHECK-NEXT: suble r0, r2, #1 ; CHECK-NEXT: suble r0, r2, #1

View File

@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s ; RUN: llc < %s -march=arm | FileCheck %s
define i64 @f0(i64 %A, i64 %B) { define i64 @f0(i64 %A, i64 %B) {
; CHECK: f0 ; CHECK-LABEL: f0:
; CHECK: lsrs r3, r3, #1 ; CHECK: lsrs r3, r3, #1
; CHECK-NEXT: rrx r2, r2 ; CHECK-NEXT: rrx r2, r2
; CHECK-NEXT: subs r0, r0, r2 ; CHECK-NEXT: subs r0, r0, r2
@ -13,7 +13,7 @@ define i64 @f0(i64 %A, i64 %B) {
} }
define i32 @f1(i64 %x, i64 %y) { define i32 @f1(i64 %x, i64 %y) {
; CHECK: f1 ; CHECK-LABEL: f1:
; CHECK: lsl{{.*}}r2 ; CHECK: lsl{{.*}}r2
%a = shl i64 %x, %y %a = shl i64 %x, %y
%b = trunc i64 %a to i32 %b = trunc i64 %a to i32
@ -21,7 +21,7 @@ define i32 @f1(i64 %x, i64 %y) {
} }
define i32 @f2(i64 %x, i64 %y) { define i32 @f2(i64 %x, i64 %y) {
; CHECK: f2 ; CHECK-LABEL: f2:
; CHECK: lsr{{.*}}r2 ; CHECK: lsr{{.*}}r2
; CHECK-NEXT: rsb r3, r2, #32 ; CHECK-NEXT: rsb r3, r2, #32
; CHECK-NEXT: sub r2, r2, #32 ; CHECK-NEXT: sub r2, r2, #32
@ -34,7 +34,7 @@ define i32 @f2(i64 %x, i64 %y) {
} }
define i32 @f3(i64 %x, i64 %y) { define i32 @f3(i64 %x, i64 %y) {
; CHECK: f3 ; CHECK-LABEL: f3:
; CHECK: lsr{{.*}}r2 ; CHECK: lsr{{.*}}r2
; CHECK-NEXT: rsb r3, r2, #32 ; CHECK-NEXT: rsb r3, r2, #32
; CHECK-NEXT: sub r2, r2, #32 ; CHECK-NEXT: sub r2, r2, #32

View File

@ -75,7 +75,7 @@ define double @f7(double %a, double %b) {
; into the constant pool based on the value of the "icmp". If we have one "it" ; into the constant pool based on the value of the "icmp". If we have one "it"
; block generated, odds are good that we have close to the ideal code for this: ; block generated, odds are good that we have close to the ideal code for this:
; ;
; CHECK-NEON: _f8: ; CHECK-NEON-LABEL: f8:
; CHECK-NEON: movw [[R3:r[0-9]+]], #1123 ; CHECK-NEON: movw [[R3:r[0-9]+]], #1123
; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0 ; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0
; CHECK-NEON-NEXT: cmp r0, [[R3]] ; CHECK-NEON-NEXT: cmp r0, [[R3]]
@ -113,7 +113,7 @@ entry:
ret void ret void
} }
; CHECK: f10 ; CHECK-LABEL: f10:
define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp { define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp {
; CHECK-NOT: floatsisf ; CHECK-NOT: floatsisf
%1 = icmp eq i32 %a, %b %1 = icmp eq i32 %a, %b
@ -122,7 +122,7 @@ define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp {
ret float %3 ret float %3
} }
; CHECK: f11 ; CHECK-LABEL: f11:
define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp { define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp {
; CHECK-NOT: floatsisf ; CHECK-NOT: floatsisf
%1 = icmp eq i32 %a, %b %1 = icmp eq i32 %a, %b
@ -130,7 +130,7 @@ define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp {
ret float %2 ret float %2
} }
; CHECK: f12 ; CHECK-LABEL: f12:
define float @f12(i32 %a, i32 %b) nounwind uwtable readnone ssp { define float @f12(i32 %a, i32 %b) nounwind uwtable readnone ssp {
; CHECK-NOT: floatunsisf ; CHECK-NOT: floatunsisf
%1 = icmp eq i32 %a, %b %1 = icmp eq i32 %a, %b

View File

@ -29,7 +29,7 @@ entry:
; Radar 8407927: Make sure that VMOVRRD gets optimized away when the result is ; Radar 8407927: Make sure that VMOVRRD gets optimized away when the result is
; converted back to be used as a vector type. ; converted back to be used as a vector type.
; CHECK: test_vmovrrd_combine ; CHECK-LABEL: test_vmovrrd_combine:
define <4 x i32> @test_vmovrrd_combine() nounwind { define <4 x i32> @test_vmovrrd_combine() nounwind {
entry: entry:
br i1 undef, label %bb1, label %bb2 br i1 undef, label %bb1, label %bb2
@ -136,7 +136,7 @@ define i16 @foldBuildVectors() {
; Test that we are generating vrev and vext for reverse shuffles of v8i16 ; Test that we are generating vrev and vext for reverse shuffles of v8i16
; shuffles. ; shuffles.
; CHECK: reverse_v8i16 ; CHECK-LABEL: reverse_v8i16:
define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) { define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) {
%v0 = load <8 x i16>* %loadaddr %v0 = load <8 x i16>* %loadaddr
; CHECK: vrev64.16 ; CHECK: vrev64.16
@ -149,7 +149,7 @@ define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) {
; Test that we are generating vrev and vext for reverse shuffles of v16i8 ; Test that we are generating vrev and vext for reverse shuffles of v16i8
; shuffles. ; shuffles.
; CHECK: reverse_v16i8 ; CHECK-LABEL: reverse_v16i8:
define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) { define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) {
%v0 = load <16 x i8>* %loadaddr %v0 = load <16 x i8>* %loadaddr
; CHECK: vrev64.8 ; CHECK: vrev64.8
@ -165,7 +165,7 @@ define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) {
; vldr cannot handle unaligned loads. ; vldr cannot handle unaligned loads.
; Fall back to vld1.32, which can, instead of using the general purpose loads ; Fall back to vld1.32, which can, instead of using the general purpose loads
; followed by a costly sequence of instructions to build the vector register. ; followed by a costly sequence of instructions to build the vector register.
; CHECK: t3 ; CHECK-LABEL: t3:
; CHECK: vld1.32 {[[REG:d[0-9]+]][0]} ; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}
; CHECK: vld1.32 {[[REG]][1]} ; CHECK: vld1.32 {[[REG]][1]}
; CHECK: vmull.u8 q{{[0-9]+}}, [[REG]], [[REG]] ; CHECK: vmull.u8 q{{[0-9]+}}, [[REG]], [[REG]]
@ -188,7 +188,7 @@ declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>)
; Check that (insert_vector_elt (load)) => (vector_load). ; Check that (insert_vector_elt (load)) => (vector_load).
; Thus, check that scalar_to_vector do not interfer with that. ; Thus, check that scalar_to_vector do not interfer with that.
define <8 x i16> @t4(i8* nocapture %sp0) { define <8 x i16> @t4(i8* nocapture %sp0) {
; CHECK: t4 ; CHECK-LABEL: t4:
; CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r0] ; CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r0]
entry: entry:
%pix_sp0.0.cast = bitcast i8* %sp0 to i32* %pix_sp0.0.cast = bitcast i8* %sp0 to i32*
@ -202,7 +202,7 @@ entry:
; Make sure vector load is used for all three loads. ; Make sure vector load is used for all three loads.
; Lowering to build vector was breaking the single use property of the load of ; Lowering to build vector was breaking the single use property of the load of
; %pix_sp0.0.copyload. ; %pix_sp0.0.copyload.
; CHECK: t5 ; CHECK-LABEL: t5:
; CHECK: vld1.32 {[[REG1:d[0-9]+]][1]}, [r0] ; CHECK: vld1.32 {[[REG1:d[0-9]+]][1]}, [r0]
; CHECK: vorr [[REG2:d[0-9]+]], [[REG1]], [[REG1]] ; CHECK: vorr [[REG2:d[0-9]+]], [[REG1]], [[REG1]]
; CHECK: vld1.32 {[[REG1]][0]}, [r1] ; CHECK: vld1.32 {[[REG1]][0]}, [r1]