From 771e0ab32a81eb501edcd4027e07c5c14d591a09 Mon Sep 17 00:00:00 2001 From: Stephen Lin Date: Thu, 18 Jul 2013 22:29:15 +0000 Subject: [PATCH] Disambiguate function names in some CodeGen tests. (Some tests were using function names that also were names of instructions and/or doing other unusual things that were making the test not amenable to otherwise scriptable pattern matching.) No functionality change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@186621 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/ARM/fast-isel.ll | 2 +- test/CodeGen/ARM/umulo-32.ll | 8 +-- test/CodeGen/PowerPC/store-update.ll | 56 ++++++++-------- test/CodeGen/SPARC/2011-01-19-DelaySlot.ll | 4 +- test/CodeGen/SPARC/2011-01-22-SRet.ll | 2 +- test/CodeGen/X86/avx-blend.ll | 16 ++--- test/CodeGen/X86/avx2-arith.ll | 21 +++--- test/CodeGen/X86/fma_patterns.ll | 8 +-- test/CodeGen/X86/inline-asm-fpstack.ll | 2 +- test/CodeGen/X86/jump_sign.ll | 76 +++++++++++----------- test/CodeGen/X86/shift-bmi2.ll | 6 +- test/CodeGen/X86/sse-minmax.ll | 16 ++--- test/CodeGen/X86/widen_load-2.ll | 2 - test/CodeGen/XCore/threads.ll | 32 ++++----- 14 files changed, 125 insertions(+), 126 deletions(-) diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll index 682fa35c12a..0cebc9067be 100644 --- a/test/CodeGen/ARM/fast-isel.ll +++ b/test/CodeGen/ARM/fast-isel.ll @@ -3,7 +3,7 @@ ; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB ; Very basic fast-isel functionality. -define i32 @add(i32 %a, i32 %b) nounwind { +define i32 @test0(i32 %a, i32 %b) nounwind { entry: %a.addr = alloca i32, align 4 %b.addr = alloca i32, align 4 diff --git a/test/CodeGen/ARM/umulo-32.ll b/test/CodeGen/ARM/umulo-32.ll index fa5c0168fef..19875ce9407 100644 --- a/test/CodeGen/ARM/umulo-32.ll +++ b/test/CodeGen/ARM/umulo-32.ll @@ -2,8 +2,8 @@ %umul.ty = type { i32, i1 } -define i32 @func(i32 %a) nounwind { -; CHECK: func +define i32 @test1(i32 %a) nounwind { +; CHECK: test1: ; CHECK: muldi3 %tmp0 = tail call %umul.ty @llvm.umul.with.overflow.i32(i32 %a, i32 37) %tmp1 = extractvalue %umul.ty %tmp0, 0 @@ -13,8 +13,8 @@ define i32 @func(i32 %a) nounwind { declare %umul.ty @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone -define i32 @f(i32 %argc, i8** %argv) ssp { -; CHECK: func +define i32 @test2(i32 %argc, i8** %argv) ssp { +; CHECK: test2: ; CHECK: str r0 ; CHECK: movs r2 ; CHECK: mov r1 diff --git a/test/CodeGen/PowerPC/store-update.ll b/test/CodeGen/PowerPC/store-update.ll index 538ed24fbc4..7b9e8f720a1 100644 --- a/test/CodeGen/PowerPC/store-update.ll +++ b/test/CodeGen/PowerPC/store-update.ll @@ -3,166 +3,166 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" target triple = "powerpc64-unknown-linux-gnu" -define i8* @stbu(i8* %base, i8 zeroext %val) nounwind { +define i8* @test_stbu(i8* %base, i8 zeroext %val) nounwind { entry: %arrayidx = getelementptr inbounds i8* %base, i64 16 store i8 %val, i8* %arrayidx, align 1 ret i8* %arrayidx } -; CHECK: @stbu +; CHECK: @test_stbu ; CHECK: %entry ; CHECK-NEXT: stbu ; CHECK-NEXT: blr -define i8* @stbux(i8* %base, i8 zeroext %val, i64 %offset) nounwind { +define i8* @test_stbux(i8* %base, i8 zeroext %val, i64 %offset) nounwind { entry: %arrayidx = getelementptr inbounds i8* %base, i64 %offset store i8 %val, i8* %arrayidx, align 1 ret i8* %arrayidx } -; CHECK: @stbux +; CHECK: @test_stbux ; CHECK: %entry ; CHECK-NEXT: stbux ; CHECK-NEXT: blr -define i16* @sthu(i16* %base, i16 zeroext %val) nounwind { +define i16* @test_sthu(i16* %base, i16 zeroext %val) nounwind { entry: %arrayidx = getelementptr inbounds i16* %base, i64 16 store i16 %val, i16* %arrayidx, align 2 ret i16* %arrayidx } -; CHECK: @sthu +; CHECK: @test_sthu ; CHECK: %entry ; CHECK-NEXT: sthu ; CHECK-NEXT: blr -define i16* @sthux(i16* %base, i16 zeroext %val, i64 %offset) nounwind { +define i16* @test_sthux(i16* %base, i16 zeroext %val, i64 %offset) nounwind { entry: %arrayidx = getelementptr inbounds i16* %base, i64 %offset store i16 %val, i16* %arrayidx, align 2 ret i16* %arrayidx } -; CHECK: @sthux +; CHECK: @test_sthux ; CHECK: %entry ; CHECK-NEXT: sldi ; CHECK-NEXT: sthux ; CHECK-NEXT: blr -define i32* @stwu(i32* %base, i32 zeroext %val) nounwind { +define i32* @test_stwu(i32* %base, i32 zeroext %val) nounwind { entry: %arrayidx = getelementptr inbounds i32* %base, i64 16 store i32 %val, i32* %arrayidx, align 4 ret i32* %arrayidx } -; CHECK: @stwu +; CHECK: @test_stwu ; CHECK: %entry ; CHECK-NEXT: stwu ; CHECK-NEXT: blr -define i32* @stwux(i32* %base, i32 zeroext %val, i64 %offset) nounwind { +define i32* @test_stwux(i32* %base, i32 zeroext %val, i64 %offset) nounwind { entry: %arrayidx = getelementptr inbounds i32* %base, i64 %offset store i32 %val, i32* %arrayidx, align 4 ret i32* %arrayidx } -; CHECK: @stwux +; CHECK: @test_stwux ; CHECK: %entry ; CHECK-NEXT: sldi ; CHECK-NEXT: stwux ; CHECK-NEXT: blr -define i8* @stbu8(i8* %base, i64 %val) nounwind { +define i8* @test_stbu8(i8* %base, i64 %val) nounwind { entry: %conv = trunc i64 %val to i8 %arrayidx = getelementptr inbounds i8* %base, i64 16 store i8 %conv, i8* %arrayidx, align 1 ret i8* %arrayidx } -; CHECK: @stbu +; CHECK: @test_stbu8 ; CHECK: %entry ; CHECK-NEXT: stbu ; CHECK-NEXT: blr -define i8* @stbux8(i8* %base, i64 %val, i64 %offset) nounwind { +define i8* @test_stbux8(i8* %base, i64 %val, i64 %offset) nounwind { entry: %conv = trunc i64 %val to i8 %arrayidx = getelementptr inbounds i8* %base, i64 %offset store i8 %conv, i8* %arrayidx, align 1 ret i8* %arrayidx } -; CHECK: @stbux +; CHECK: @test_stbux8 ; CHECK: %entry ; CHECK-NEXT: stbux ; CHECK-NEXT: blr -define i16* @sthu8(i16* %base, i64 %val) nounwind { +define i16* @test_sthu8(i16* %base, i64 %val) nounwind { entry: %conv = trunc i64 %val to i16 %arrayidx = getelementptr inbounds i16* %base, i64 16 store i16 %conv, i16* %arrayidx, align 2 ret i16* %arrayidx } -; CHECK: @sthu +; CHECK: @test_sthu ; CHECK: %entry ; CHECK-NEXT: sthu ; CHECK-NEXT: blr -define i16* @sthux8(i16* %base, i64 %val, i64 %offset) nounwind { +define i16* @test_sthux8(i16* %base, i64 %val, i64 %offset) nounwind { entry: %conv = trunc i64 %val to i16 %arrayidx = getelementptr inbounds i16* %base, i64 %offset store i16 %conv, i16* %arrayidx, align 2 ret i16* %arrayidx } -; CHECK: @sthux +; CHECK: @test_sthux ; CHECK: %entry ; CHECK-NEXT: sldi ; CHECK-NEXT: sthux ; CHECK-NEXT: blr -define i32* @stwu8(i32* %base, i64 %val) nounwind { +define i32* @test_stwu8(i32* %base, i64 %val) nounwind { entry: %conv = trunc i64 %val to i32 %arrayidx = getelementptr inbounds i32* %base, i64 16 store i32 %conv, i32* %arrayidx, align 4 ret i32* %arrayidx } -; CHECK: @stwu +; CHECK: @test_stwu ; CHECK: %entry ; CHECK-NEXT: stwu ; CHECK-NEXT: blr -define i32* @stwux8(i32* %base, i64 %val, i64 %offset) nounwind { +define i32* @test_stwux8(i32* %base, i64 %val, i64 %offset) nounwind { entry: %conv = trunc i64 %val to i32 %arrayidx = getelementptr inbounds i32* %base, i64 %offset store i32 %conv, i32* %arrayidx, align 4 ret i32* %arrayidx } -; CHECK: @stwux +; CHECK: @test_stwux ; CHECK: %entry ; CHECK-NEXT: sldi ; CHECK-NEXT: stwux ; CHECK-NEXT: blr -define i64* @stdu(i64* %base, i64 %val) nounwind { +define i64* @test_stdu(i64* %base, i64 %val) nounwind { entry: %arrayidx = getelementptr inbounds i64* %base, i64 16 store i64 %val, i64* %arrayidx, align 8 ret i64* %arrayidx } -; CHECK: @stdu +; CHECK: @test_stdu ; CHECK: %entry ; CHECK-NEXT: stdu ; CHECK-NEXT: blr -define i64* @stdux(i64* %base, i64 %val, i64 %offset) nounwind { +define i64* @test_stdux(i64* %base, i64 %val, i64 %offset) nounwind { entry: %arrayidx = getelementptr inbounds i64* %base, i64 %offset store i64 %val, i64* %arrayidx, align 8 ret i64* %arrayidx } -; CHECK: @stdux +; CHECK: @test_stdux ; CHECK: %entry ; CHECK-NEXT: sldi ; CHECK-NEXT: stdux diff --git a/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll b/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll index dc12b975a88..401d902cc0e 100644 --- a/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll +++ b/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll @@ -161,7 +161,7 @@ entry: define i32 @restore_sethi_3bit(i32 %a) { entry: -;CHECK: restore_sethi +;CHECK: restore_sethi_3bit ;CHECK: sethi 6 ;CHECK-NOT: restore %g0, 6144, %o0 %0 = tail call i32 @bar(i32 %a) nounwind @@ -172,7 +172,7 @@ entry: define i32 @restore_sethi_large(i32 %a) { entry: -;CHECK: restore_sethi +;CHECK: restore_sethi_large ;CHECK: sethi 4000, %i0 ;CHECK: restore %g0, %g0, %g0 %0 = tail call i32 @bar(i32 %a) nounwind diff --git a/test/CodeGen/SPARC/2011-01-22-SRet.ll b/test/CodeGen/SPARC/2011-01-22-SRet.ll index 942971b92fe..c6a1dc9eba6 100644 --- a/test/CodeGen/SPARC/2011-01-22-SRet.ll +++ b/test/CodeGen/SPARC/2011-01-22-SRet.ll @@ -20,7 +20,7 @@ define i32 @test() nounwind { entry: ;CHECK: test ;CHECK: st {{.+}}, [%sp+64] -;CHECK: make_foo +;CHECK: call make_foo ;CHECK: unimp 12 %f = alloca %struct.foo_t, align 8 call void @make_foo(%struct.foo_t* noalias sret %f, i32 10, i32 20, i32 30) nounwind diff --git a/test/CodeGen/X86/avx-blend.ll b/test/CodeGen/X86/avx-blend.ll index 188efe26d92..224cc50b076 100644 --- a/test/CodeGen/X86/avx-blend.ll +++ b/test/CodeGen/X86/avx-blend.ll @@ -50,7 +50,7 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) { ; AVX256 tests: -;CHECK: vsel_float +;CHECK: vsel_float8 ;CHECK: vblendvps ;CHECK: ret define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) { @@ -58,7 +58,7 @@ define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) { ret <8 x float> %vsel } -;CHECK: vsel_i32 +;CHECK: vsel_i328 ;CHECK: vblendvps ;CHECK: ret define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) { @@ -66,7 +66,7 @@ define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) { ret <8 x i32> %vsel } -;CHECK: vsel_double +;CHECK: vsel_double8 ;CHECK: vblendvpd ;CHECK: ret define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) { @@ -74,7 +74,7 @@ define <8 x double> @vsel_double8(<8 x double> %v1, <8 x double> %v2) { ret <8 x double> %vsel } -;CHECK: vsel_i64 +;CHECK: vsel_i648 ;CHECK: vblendvpd ;CHECK: ret define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) { @@ -83,8 +83,8 @@ define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) { } ;; TEST blend + compares -; CHECK: A -define <2 x double> @A(<2 x double> %x, <2 x double> %y) { +; CHECK: testa +define <2 x double> @testa(<2 x double> %x, <2 x double> %y) { ; CHECK: vcmplepd ; CHECK: vblendvpd %max_is_x = fcmp oge <2 x double> %x, %y @@ -92,8 +92,8 @@ define <2 x double> @A(<2 x double> %x, <2 x double> %y) { ret <2 x double> %max } -; CHECK: B -define <2 x double> @B(<2 x double> %x, <2 x double> %y) { +; CHECK: testb +define <2 x double> @testb(<2 x double> %x, <2 x double> %y) { ; CHECK: vcmpnlepd ; CHECK: vblendvpd %min_is_x = fcmp ult <2 x double> %x, %y diff --git a/test/CodeGen/X86/avx2-arith.ll b/test/CodeGen/X86/avx2-arith.ll index dee4bd3c6bb..997fa190eca 100644 --- a/test/CodeGen/X86/avx2-arith.ll +++ b/test/CodeGen/X86/avx2-arith.ll @@ -1,65 +1,66 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s ; CHECK: vpaddq %ymm -define <4 x i64> @vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone { +define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone { %x = add <4 x i64> %i, %j ret <4 x i64> %x } ; CHECK: vpaddd %ymm -define <8 x i32> @vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone { +define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone { %x = add <8 x i32> %i, %j ret <8 x i32> %x } ; CHECK: vpaddw %ymm -define <16 x i16> @vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { +define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { %x = add <16 x i16> %i, %j ret <16 x i16> %x } ; CHECK: vpaddb %ymm -define <32 x i8> @vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone { +define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone { %x = add <32 x i8> %i, %j ret <32 x i8> %x } ; CHECK: vpsubq %ymm -define <4 x i64> @vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone { +define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone { %x = sub <4 x i64> %i, %j ret <4 x i64> %x } ; CHECK: vpsubd %ymm -define <8 x i32> @vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone { +define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone { %x = sub <8 x i32> %i, %j ret <8 x i32> %x } ; CHECK: vpsubw %ymm -define <16 x i16> @vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { +define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { %x = sub <16 x i16> %i, %j ret <16 x i16> %x } ; CHECK: vpsubb %ymm -define <32 x i8> @vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone { +define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone { %x = sub <32 x i8> %i, %j ret <32 x i8> %x } ; CHECK: vpmulld %ymm -define <8 x i32> @vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone { +define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone { %x = mul <8 x i32> %i, %j ret <8 x i32> %x } ; CHECK: vpmullw %ymm -define <16 x i16> @vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { +define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone { %x = mul <16 x i16> %i, %j ret <16 x i16> %x } +; CHECK: mul-v4i64 ; CHECK: vpmuludq %ymm ; CHECK-NEXT: vpsrlq $32, %ymm ; CHECK-NEXT: vpmuludq %ymm diff --git a/test/CodeGen/X86/fma_patterns.ll b/test/CodeGen/X86/fma_patterns.ll index 6d98d59b382..cfb598df634 100644 --- a/test/CodeGen/X86/fma_patterns.ll +++ b/test/CodeGen/X86/fma_patterns.ll @@ -182,11 +182,11 @@ define float @test_x86_fnmsub_ss(float %a0, float %a1, float %a2) { ret float %res } -; CHECK: test_x86_fmadd_ps +; CHECK: test_x86_fmadd_ps_load ; CHECK: vmovaps (%rdi), %xmm2 ; CHECK: vfmadd213ps %xmm1, %xmm0, %xmm2 ; CHECK: ret -; CHECK_FMA4: test_x86_fmadd_ps +; CHECK_FMA4: test_x86_fmadd_ps_load ; CHECK_FMA4: vfmaddps %xmm1, (%rdi), %xmm0, %xmm0 ; CHECK_FMA4: ret define <4 x float> @test_x86_fmadd_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) { @@ -196,11 +196,11 @@ define <4 x float> @test_x86_fmadd_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 ret <4 x float> %res } -; CHECK: test_x86_fmsub_ps +; CHECK: test_x86_fmsub_ps_load ; CHECK: vmovaps (%rdi), %xmm2 ; CHECK: fmsub213ps %xmm1, %xmm0, %xmm2 ; CHECK: ret -; CHECK_FMA4: test_x86_fmsub_ps +; CHECK_FMA4: test_x86_fmsub_ps_load ; CHECK_FMA4: vfmsubps %xmm1, (%rdi), %xmm0, %xmm0 ; CHECK_FMA4: ret define <4 x float> @test_x86_fmsub_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) { diff --git a/test/CodeGen/X86/inline-asm-fpstack.ll b/test/CodeGen/X86/inline-asm-fpstack.ll index 2249618c8a9..e83c065632d 100644 --- a/test/CodeGen/X86/inline-asm-fpstack.ll +++ b/test/CodeGen/X86/inline-asm-fpstack.ll @@ -147,7 +147,7 @@ declare x86_fp80 @ceil(x86_fp80) ; PR4484 ; test1 leaves a value on the stack that is needed after the asm. ; CHECK: testPR4484 -; CHECK: test1 +; CHECK: calll _test1 ; CHECK-NOT: fstp ; Load %a from stack after ceil ; CHECK: fldt diff --git a/test/CodeGen/X86/jump_sign.ll b/test/CodeGen/X86/jump_sign.ll index 3d3a9a94e64..d4174539f2f 100644 --- a/test/CodeGen/X86/jump_sign.ll +++ b/test/CodeGen/X86/jump_sign.ll @@ -1,8 +1,8 @@ ; RUN: llc < %s -march=x86 -mcpu=pentiumpro -verify-machineinstrs | FileCheck %s -define i32 @f(i32 %X) { +define i32 @func_f(i32 %X) { entry: -; CHECK-LABEL: f: +; CHECK-LABEL: func_f: ; CHECK: jns %tmp1 = add i32 %X, 1 ; [#uses=1] %tmp = icmp slt i32 %tmp1, 0 ; [#uses=1] @@ -23,9 +23,9 @@ declare i32 @baz(...) ; rdar://10633221 ; rdar://11355268 -define i32 @g(i32 %a, i32 %b) nounwind { +define i32 @func_g(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: g: +; CHECK-LABEL: func_g: ; CHECK-NOT: test ; CHECK: cmovs %sub = sub nsw i32 %a, %b @@ -35,9 +35,9 @@ entry: } ; rdar://10734411 -define i32 @h(i32 %a, i32 %b) nounwind { +define i32 @func_h(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: h: +; CHECK-LABEL: func_h: ; CHECK-NOT: cmp ; CHECK: cmov ; CHECK-NOT: movl @@ -47,9 +47,9 @@ entry: %cond = select i1 %cmp, i32 %sub, i32 0 ret i32 %cond } -define i32 @i(i32 %a, i32 %b) nounwind { +define i32 @func_i(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: i: +; CHECK-LABEL: func_i: ; CHECK-NOT: cmp ; CHECK: cmov ; CHECK-NOT: movl @@ -59,9 +59,9 @@ entry: %cond = select i1 %cmp, i32 %sub, i32 0 ret i32 %cond } -define i32 @j(i32 %a, i32 %b) nounwind { +define i32 @func_j(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: j: +; CHECK-LABEL: func_j: ; CHECK-NOT: cmp ; CHECK: cmov ; CHECK-NOT: movl @@ -71,9 +71,9 @@ entry: %cond = select i1 %cmp, i32 %sub, i32 0 ret i32 %cond } -define i32 @k(i32 %a, i32 %b) nounwind { +define i32 @func_k(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: k: +; CHECK-LABEL: func_k: ; CHECK-NOT: cmp ; CHECK: cmov ; CHECK-NOT: movl @@ -84,18 +84,18 @@ entry: ret i32 %cond } ; redundant cmp instruction -define i32 @l(i32 %a, i32 %b) nounwind { +define i32 @func_l(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: l: +; CHECK-LABEL: func_l: ; CHECK-NOT: cmp %cmp = icmp slt i32 %b, %a %sub = sub nsw i32 %a, %b %cond = select i1 %cmp, i32 %sub, i32 %a ret i32 %cond } -define i32 @m(i32 %a, i32 %b) nounwind { +define i32 @func_m(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: m: +; CHECK-LABEL: func_m: ; CHECK-NOT: cmp %cmp = icmp sgt i32 %a, %b %sub = sub nsw i32 %a, %b @@ -104,9 +104,9 @@ entry: } ; If EFLAGS is live-out, we can't remove cmp if there exists ; a swapped sub. -define i32 @l2(i32 %a, i32 %b) nounwind { +define i32 @func_l2(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: l2: +; CHECK-LABEL: func_l2: ; CHECK: cmp %cmp = icmp eq i32 %b, %a %sub = sub nsw i32 %a, %b @@ -120,9 +120,9 @@ if.then: if.else: ret i32 %sub } -define i32 @l3(i32 %a, i32 %b) nounwind { +define i32 @func_l3(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: l3: +; CHECK-LABEL: func_l3: ; CHECK: sub ; CHECK-NOT: cmp ; CHECK: jge @@ -139,9 +139,9 @@ if.else: } ; rdar://11830760 ; When Movr0 is between sub and cmp, we need to move "Movr0" before sub. -define i32 @l4(i32 %a, i32 %b) nounwind { +define i32 @func_l4(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: l4: +; CHECK-LABEL: func_l4: ; CHECK: xor ; CHECK: sub ; CHECK-NOT: cmp @@ -151,9 +151,9 @@ entry: ret i32 %.sub } ; rdar://11540023 -define i32 @n(i32 %x, i32 %y) nounwind { +define i32 @func_n(i32 %x, i32 %y) nounwind { entry: -; CHECK-LABEL: n: +; CHECK-LABEL: func_n: ; CHECK-NOT: sub ; CHECK: cmp %sub = sub nsw i32 %x, %y @@ -162,7 +162,7 @@ entry: ret i32 %y.x } ; PR://13046 -define void @o() nounwind uwtable { +define void @func_o() nounwind uwtable { entry: %0 = load i16* undef, align 2 br i1 undef, label %if.then.i, label %if.end.i @@ -177,7 +177,7 @@ sw.bb: ; preds = %if.end.i br i1 undef, label %if.then44, label %if.end29 if.end29: ; preds = %sw.bb -; CHECK-LABEL: o: +; CHECK-LABEL: func_o: ; CHECK: cmp %1 = urem i16 %0, 10 %cmp25 = icmp eq i16 %1, 0 @@ -204,9 +204,9 @@ if.else.i104: ; preds = %if.then44 ret void } ; rdar://11855129 -define i32 @p(i32 %a, i32 %b) nounwind { +define i32 @func_p(i32 %a, i32 %b) nounwind { entry: -; CHECK-LABEL: p: +; CHECK-LABEL: func_p: ; CHECK-NOT: test ; CHECK: cmovs %add = add nsw i32 %b, %a @@ -217,8 +217,8 @@ entry: ; PR13475 ; If we have sub a, b and cmp b, a and the result of cmp is used ; by sbb, we should not optimize cmp away. -define i32 @q(i32 %j.4, i32 %w, i32 %el) { -; CHECK-LABEL: q: +define i32 @func_q(i32 %j.4, i32 %w, i32 %el) { +; CHECK-LABEL: func_q: ; CHECK: cmp ; CHECK-NEXT: sbb %tmp532 = add i32 %j.4, %w @@ -230,9 +230,9 @@ define i32 @q(i32 %j.4, i32 %w, i32 %el) { ret i32 %j.5 } ; rdar://11873276 -define i8* @r(i8* %base, i32* nocapture %offset, i32 %size) nounwind { +define i8* @func_r(i8* %base, i32* nocapture %offset, i32 %size) nounwind { entry: -; CHECK-LABEL: r: +; CHECK-LABEL: func_r: ; CHECK: sub ; CHECK-NOT: cmp ; CHECK: j @@ -254,9 +254,9 @@ return: } ; Test optimizations of dec/inc. -define i32 @dec(i32 %a) nounwind { +define i32 @func_dec(i32 %a) nounwind { entry: -; CHECK-LABEL: dec: +; CHECK-LABEL: func_dec: ; CHECK: decl ; CHECK-NOT: test ; CHECK: cmovsl @@ -266,9 +266,9 @@ entry: ret i32 %cond } -define i32 @inc(i32 %a) nounwind { +define i32 @func_inc(i32 %a) nounwind { entry: -; CHECK-LABEL: inc: +; CHECK-LABEL: func_inc: ; CHECK: incl ; CHECK-NOT: test ; CHECK: cmovsl @@ -281,9 +281,9 @@ entry: ; PR13966 @b = common global i32 0, align 4 @a = common global i32 0, align 4 -define i32 @test1(i32 %p1) nounwind uwtable { +define i32 @func_test1(i32 %p1) nounwind uwtable { entry: -; CHECK-LABEL: test1: +; CHECK-LABEL: func_test1: ; CHECK: testb ; CHECK: j ; CHECK: ret diff --git a/test/CodeGen/X86/shift-bmi2.ll b/test/CodeGen/X86/shift-bmi2.ll index d1f321f1773..01167893a89 100644 --- a/test/CodeGen/X86/shift-bmi2.ll +++ b/test/CodeGen/X86/shift-bmi2.ll @@ -83,7 +83,7 @@ define i64 @shl64pi(i64* %p) nounwind uwtable readnone { entry: %x = load i64* %p %shl = shl i64 %x, 7 -; BMI264: shl64p +; BMI264: shl64pi ; BMI264-NOT: shlxq ; BMI264: ret ret i64 %shl @@ -108,7 +108,7 @@ entry: ; BMI2: lshr32p ; BMI2: shrxl %{{.+}}, ({{.+}}), %{{.+}} ; BMI2: ret -; BMI264: lshr32 +; BMI264: lshr32p ; BMI264: shrxl %{{.+}}, ({{.+}}), %{{.+}} ; BMI264: ret ret i32 %shl @@ -152,7 +152,7 @@ entry: ; BMI2: ashr32p ; BMI2: sarxl %{{.+}}, ({{.+}}), %{{.+}} ; BMI2: ret -; BMI264: ashr32 +; BMI264: ashr32p ; BMI264: sarxl %{{.+}}, ({{.+}}), %{{.+}} ; BMI264: ret ret i32 %shl diff --git a/test/CodeGen/X86/sse-minmax.ll b/test/CodeGen/X86/sse-minmax.ll index 7a2ea6bb3fb..5fa0ca3226c 100644 --- a/test/CodeGen/X86/sse-minmax.ll +++ b/test/CodeGen/X86/sse-minmax.ll @@ -915,33 +915,33 @@ entry: ret double %x_addr.0 } -; UNSAFE-LABEL: maxpd: +; UNSAFE-LABEL: test_maxpd: ; UNSAFE: maxpd -define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) { +define <2 x double> @test_maxpd(<2 x double> %x, <2 x double> %y) { %max_is_x = fcmp oge <2 x double> %x, %y %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y ret <2 x double> %max } -; UNSAFE-LABEL: minpd: +; UNSAFE-LABEL: test_minpd: ; UNSAFE: minpd -define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) { +define <2 x double> @test_minpd(<2 x double> %x, <2 x double> %y) { %min_is_x = fcmp ole <2 x double> %x, %y %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y ret <2 x double> %min } -; UNSAFE-LABEL: maxps: +; UNSAFE-LABEL: test_maxps: ; UNSAFE: maxps -define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) { +define <4 x float> @test_maxps(<4 x float> %x, <4 x float> %y) { %max_is_x = fcmp oge <4 x float> %x, %y %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y ret <4 x float> %max } -; UNSAFE-LABEL: minps: +; UNSAFE-LABEL: test_minps: ; UNSAFE: minps -define <4 x float> @minps(<4 x float> %x, <4 x float> %y) { +define <4 x float> @test_minps(<4 x float> %x, <4 x float> %y) { %min_is_x = fcmp ole <4 x float> %x, %y %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y ret <4 x float> %min diff --git a/test/CodeGen/X86/widen_load-2.ll b/test/CodeGen/X86/widen_load-2.ll index 2781dcfb4c2..f0f94e47921 100644 --- a/test/CodeGen/X86/widen_load-2.ll +++ b/test/CodeGen/X86/widen_load-2.ll @@ -73,7 +73,6 @@ define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) { ; CHECK: add3i16 %i16vec3 = type <3 x i16> define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind { -; CHECK: add3i16 ; CHECK: addl ; CHECK: addl ; CHECK: addl @@ -88,7 +87,6 @@ define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp ; CHECK: add4i16 %i16vec4 = type <4 x i16> define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind { -; CHECK: add4i16 ; CHECK: paddd ; CHECK: movq %a = load %i16vec4* %ap, align 16 diff --git a/test/CodeGen/XCore/threads.ll b/test/CodeGen/XCore/threads.ll index 960262312d3..5840e777618 100644 --- a/test/CodeGen/XCore/threads.ll +++ b/test/CodeGen/XCore/threads.ll @@ -10,57 +10,57 @@ declare void @llvm.xcore.initlr.p1i8(i8 addrspace(1)* %r, i8* %value) declare void @llvm.xcore.initcp.p1i8(i8 addrspace(1)* %r, i8* %value) declare void @llvm.xcore.initdp.p1i8(i8 addrspace(1)* %r, i8* %value) -define i8 addrspace(1)* @getst(i8 addrspace(1)* %r) { -; CHECK-LABEL: getst: +define i8 addrspace(1)* @test_getst(i8 addrspace(1)* %r) { +; CHECK-LABEL: test_getst: ; CHECK: getst r0, res[r0] %result = call i8 addrspace(1)* @llvm.xcore.getst.p1i8.p1i8(i8 addrspace(1)* %r) ret i8 addrspace(1)* %result } -define void @ssync() { -; CHECK-LABEL: ssync: +define void @test_ssync() { +; CHECK-LABEL: test_ssync: ; CHECK: ssync call void @llvm.xcore.ssync() ret void } -define void @mjoin(i8 addrspace(1)* %r) { -; CHECK-LABEL: mjoin: +define void @test_mjoin(i8 addrspace(1)* %r) { +; CHECK-LABEL: test_mjoin: ; CHECK: mjoin res[r0] call void @llvm.xcore.mjoin.p1i8(i8 addrspace(1)* %r) ret void } -define void @initsp(i8 addrspace(1)* %t, i8* %src) { -; CHECK-LABEL: initsp: +define void @test_initsp(i8 addrspace(1)* %t, i8* %src) { +; CHECK-LABEL: test_initsp: ; CHECK: init t[r0]:sp, r1 call void @llvm.xcore.initsp.p1i8(i8 addrspace(1)* %t, i8* %src) ret void } -define void @initpc(i8 addrspace(1)* %t, i8* %src) { -; CHECK-LABEL: initpc: +define void @test_initpc(i8 addrspace(1)* %t, i8* %src) { +; CHECK-LABEL: test_initpc: ; CHECK: init t[r0]:pc, r1 call void @llvm.xcore.initpc.p1i8(i8 addrspace(1)* %t, i8* %src) ret void } -define void @initlr(i8 addrspace(1)* %t, i8* %src) { -; CHECK-LABEL: initlr: +define void @test_initlr(i8 addrspace(1)* %t, i8* %src) { +; CHECK-LABEL: test_initlr: ; CHECK: init t[r0]:lr, r1 call void @llvm.xcore.initlr.p1i8(i8 addrspace(1)* %t, i8* %src) ret void } -define void @initcp(i8 addrspace(1)* %t, i8* %src) { -; CHECK-LABEL: initcp: +define void @test_initcp(i8 addrspace(1)* %t, i8* %src) { +; CHECK-LABEL: test_initcp: ; CHECK: init t[r0]:cp, r1 call void @llvm.xcore.initcp.p1i8(i8 addrspace(1)* %t, i8* %src) ret void } -define void @initdp(i8 addrspace(1)* %t, i8* %src) { -; CHECK-LABEL: initdp: +define void @test_initdp(i8 addrspace(1)* %t, i8* %src) { +; CHECK-LABEL: test_initdp: ; CHECK: init t[r0]:dp, r1 call void @llvm.xcore.initdp.p1i8(i8 addrspace(1)* %t, i8* %src) ret void