test/CodeGen/X86: FileCheck-ize and add explicit -mtriple=x86_64-linux. They are useless to Win64 target.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@127732 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
NAKAMURA Takumi 2011-03-16 13:52:38 +00:00
parent 4491aa49b3
commit ddbfbcf72e
12 changed files with 62 additions and 24 deletions

View File

@ -1,5 +1,10 @@
; RUN: llc < %s -march=x86-64 | grep paddw | count 2
; RUN: llc < %s -march=x86-64 | not grep mov
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK-NOT: mov
; CHECK: paddw
; CHECK-NOT: mov
; CHECK: paddw
; CHECK-NOT: paddw
; CHECK-NOT: mov
; The 2-addr pass should ensure that identical code is produced for these functions
; no extra copy should be generated.

View File

@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86 | not grep lea
; RUN: llc < %s -march=x86-64 | not grep lea
; RUN: llc < %s -march=x86 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK-NOT: lea
@B = external global [1000 x i8], align 32
@A = external global [1000 x i8], align 32

View File

@ -1,6 +1,13 @@
; RUN: llc < %s -march=x86-64 | grep min | count 1
; RUN: llc < %s -march=x86-64 | grep max | count 1
; RUN: llc < %s -march=x86-64 | grep mov | count 2
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK-NOT: {{(min|max|mov)}}
; CHECK: mov
; CHECK-NOT: {{(min|max|mov)}}
; CHECK: min
; CHECK-NOT: {{(min|max|mov)}}
; CHECK: mov
; CHECK-NOT: {{(min|max|mov)}}
; CHECK: max
; CHECK-NOT: {{(min|max|mov)}}
declare float @bar()

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86-64 | not grep mov
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK-NOT: mov
define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
%t = load <4 x float>* %p

View File

@ -1,4 +1,9 @@
; RUN: llc < %s -march=x86-64 | grep movap | count 2
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK-NOT: movapd
; CHECK: movaps
; CHECK-NOT: movaps
; CHECK: movapd
; CHECK-NOT: movap
define void @foo(<4 x float>* %p, <4 x float> %x) nounwind {
store <4 x float> %x, <4 x float>* %p

View File

@ -1,4 +1,6 @@
; RUN: llc < %s -march=x86-64 | grep movaps | count 1
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK: movaps
; CHECK-NOT: movaps
define void @bar(<2 x i64>* %p, <2 x i64> %x) nounwind {
store <2 x i64> %x, <2 x i64>* %p

View File

@ -1,4 +1,4 @@
; RUN: llc -march=x86-64 < %s | FileCheck %s
; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
; Commute the comparison to avoid a move.
; PR7500.

View File

@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86-64 -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& \
; RUN: grep fail | count 1
; RUN: llc < %s -mtriple=x86_64-linux -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& FileCheck %s
; CHECK: fail
; CHECK-NOT: fail
declare float @test_f(float %f)
declare double @test_d(double %f)

View File

@ -1,4 +1,5 @@
; RUN: llc < %s -march=x86-64 | grep {testb \[%\]al, \[%\]al}
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK: testb %al, %al
%struct.__va_list_tag = type { i32, i32, i8*, i8* }

View File

@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86 -relocation-model=static | not grep lea
; RUN: llc < %s -march=x86-64 | not grep lea
; RUN: llc < %s -march=x86 -relocation-model=static | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK-NOT: lea
; P should be sunk into the loop and folded into the address mode. There
; shouldn't be any lea instructions inside the loop.

View File

@ -1,5 +1,6 @@
; RUN: llc < %s -march=x86 | not grep lea
; RUN: llc < %s -march=x86-64 | not grep lea
; RUN: llc < %s -march=x86 | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
; CHECK-NOT: lea
@B = external global [1000 x float], align 32
@A = external global [1000 x float], align 32

View File

@ -1,15 +1,30 @@
; RUN: llc < %s -march=x86-64 -tailcallopt | grep TAILCALL
; RUN: llc < %s -mtriple=x86_64-linux -tailcallopt | FileCheck %s
; FIXME: Win64 does not support byval.
; Expect the entry point.
; CHECK: tailcaller:
; Expect 2 rep;movs because of tail call byval lowering.
; RUN: llc < %s -march=x86-64 -tailcallopt | grep rep | wc -l | grep 2
; CHECK: rep;
; CHECK: rep;
; A sequence of copyto/copyfrom virtual registers is used to deal with byval
; lowering appearing after moving arguments to registers. The following two
; checks verify that the register allocator changes those sequences to direct
; moves to argument register where it can (for registers that are not used in
; byval lowering - not rsi, not rdi, not rcx).
; Expect argument 4 to be moved directly to register edx.
; RUN: llc < %s -march=x86-64 -tailcallopt | grep movl | grep {7} | grep edx
; CHECK: movl $7, %edx
; Expect argument 6 to be moved directly to register r8.
; RUN: llc < %s -march=x86-64 -tailcallopt | grep movl | grep {17} | grep r8
; CHECK: movl $17, %r8d
; Expect not call but jmp to @tailcallee.
; CHECK: jmp tailcallee
; Expect the trailer.
; CHECK: .size tailcaller
%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
i64, i64, i64, i64, i64, i64, i64, i64,
@ -25,5 +40,3 @@ entry:
%tmp4 = tail call fastcc i64 @tailcallee(%struct.s* %a byval, i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
ret i64 %tmp4
}