2010-12-05 02:08:07 +00:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
|
2009-12-11 20:09:21 +00:00
|
|
|
; PR5757
|
2009-12-11 19:50:50 +00:00
|
|
|
|
|
|
|
%0 = type { i64, i32 }
|
|
|
|
|
2010-12-05 01:13:58 +00:00
|
|
|
define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
|
2009-12-11 19:50:50 +00:00
|
|
|
%t0 = load %0* %p
|
|
|
|
%t1 = load %0* %q
|
|
|
|
%t4 = select i1 %r, %0 %t0, %0 %t1
|
|
|
|
%t5 = extractvalue %0 %t4, 1
|
|
|
|
ret i32 %t5
|
2010-12-05 01:13:58 +00:00
|
|
|
; CHECK: test1:
|
|
|
|
; CHECK: cmovneq %rdi, %rsi
|
|
|
|
; CHECK: movl (%rsi), %eax
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
; PR2139
|
|
|
|
define i32 @test2() nounwind {
|
|
|
|
entry:
|
|
|
|
%tmp73 = tail call i1 @return_false() ; <i8> [#uses=1]
|
|
|
|
%g.0 = select i1 %tmp73, i16 0, i16 -480 ; <i16> [#uses=2]
|
|
|
|
%tmp7778 = sext i16 %g.0 to i32 ; <i32> [#uses=1]
|
|
|
|
%tmp80 = shl i32 %tmp7778, 3 ; <i32> [#uses=2]
|
|
|
|
%tmp87 = icmp sgt i32 %tmp80, 32767 ; <i1> [#uses=1]
|
|
|
|
br i1 %tmp87, label %bb90, label %bb91
|
|
|
|
bb90: ; preds = %bb84, %bb72
|
|
|
|
unreachable
|
|
|
|
bb91: ; preds = %bb84
|
|
|
|
ret i32 0
|
|
|
|
; CHECK: test2:
|
|
|
|
; CHECK: movnew
|
2011-04-14 01:46:37 +00:00
|
|
|
; CHECK: movswl
|
2010-12-05 01:13:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
declare i1 @return_false()
|
|
|
|
|
|
|
|
|
|
|
|
;; Select between two floating point constants.
|
|
|
|
define float @test3(i32 %x) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%0 = icmp eq i32 %x, 0 ; <i1> [#uses=1]
|
|
|
|
%iftmp.0.0 = select i1 %0, float 4.200000e+01, float 2.300000e+01 ; <float> [#uses=1]
|
|
|
|
ret float %iftmp.0.0
|
|
|
|
; CHECK: test3:
|
2010-12-05 01:31:13 +00:00
|
|
|
; CHECK: movss {{.*}},4), %xmm0
|
2010-12-05 01:13:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
|
|
|
|
entry:
|
|
|
|
%0 = fcmp olt double %F, 4.200000e+01 ; <i1> [#uses=1]
|
|
|
|
%iftmp.0.0 = select i1 %0, i32 4, i32 0 ; <i32> [#uses=1]
|
|
|
|
%1 = getelementptr i8* %P, i32 %iftmp.0.0 ; <i8*> [#uses=1]
|
|
|
|
%2 = load i8* %1, align 1 ; <i8> [#uses=1]
|
|
|
|
ret i8 %2
|
|
|
|
; CHECK: test4:
|
|
|
|
; CHECK: movsbl ({{.*}},4), %eax
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
|
|
|
|
%x = select i1 %c, <2 x i16> %a, <2 x i16> %b
|
|
|
|
store <2 x i16> %x, <2 x i16>* %p
|
|
|
|
ret void
|
|
|
|
; CHECK: test5:
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
|
|
|
|
%tmp = load <4 x float>* %A ; <<4 x float>> [#uses=1]
|
|
|
|
%tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=2]
|
|
|
|
%tmp9 = fmul <4 x float> %tmp3, %tmp3 ; <<4 x float>> [#uses=1]
|
|
|
|
%tmp.upgrd.1 = icmp eq i32 %C, 0 ; <i1> [#uses=1]
|
|
|
|
%iftmp.38.0 = select i1 %tmp.upgrd.1, <4 x float> %tmp9, <4 x float> %tmp ; <<4 x float>> [#uses=1]
|
|
|
|
store <4 x float> %iftmp.38.0, <4 x float>* %A
|
|
|
|
ret void
|
|
|
|
; Verify that the fmul gets sunk into the one part of the diamond where it is
|
|
|
|
; needed.
|
|
|
|
; CHECK: test6:
|
2012-04-16 13:49:17 +00:00
|
|
|
; CHECK: je
|
2010-12-05 01:13:58 +00:00
|
|
|
; CHECK: ret
|
2012-04-16 13:49:17 +00:00
|
|
|
; CHECK: mulps
|
2010-12-05 01:13:58 +00:00
|
|
|
; CHECK: ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; Select with fp80's
|
|
|
|
define x86_fp80 @test7(i32 %tmp8) nounwind {
|
|
|
|
%tmp9 = icmp sgt i32 %tmp8, -1 ; <i1> [#uses=1]
|
|
|
|
%retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000
|
|
|
|
ret x86_fp80 %retval
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120929 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-05 01:23:24 +00:00
|
|
|
; CHECK: test7:
|
2010-12-05 01:13:58 +00:00
|
|
|
; CHECK: leaq
|
|
|
|
; CHECK: fldt (%r{{.}}x,%r{{.}}x)
|
|
|
|
}
|
|
|
|
|
|
|
|
; widening select v6i32 and then a sub
|
|
|
|
define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) nounwind {
|
|
|
|
%x = select i1 %c, <6 x i32> %src1, <6 x i32> %src2
|
|
|
|
%val = sub <6 x i32> %x, < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
|
|
|
|
store <6 x i32> %val, <6 x i32>* %dst.addr
|
|
|
|
ret void
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120929 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-05 01:23:24 +00:00
|
|
|
|
|
|
|
; CHECK: test8:
|
2009-12-11 19:50:50 +00:00
|
|
|
}
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120929 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-05 01:23:24 +00:00
|
|
|
|
|
|
|
|
|
|
|
;; Test integer select between values and constants.
|
|
|
|
|
|
|
|
define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
|
|
|
%cmp = icmp ne i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 %y, i64 -1
|
|
|
|
ret i64 %cond
|
|
|
|
; CHECK: test9:
|
|
|
|
; CHECK: cmpq $1, %rdi
|
|
|
|
; CHECK: sbbq %rax, %rax
|
|
|
|
; CHECK: orq %rsi, %rax
|
|
|
|
; CHECK: ret
|
|
|
|
}
|
|
|
|
|
|
|
|
;; Same as test9
|
|
|
|
define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
|
|
|
%cmp = icmp eq i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 -1, i64 %y
|
|
|
|
ret i64 %cond
|
|
|
|
; CHECK: test9a:
|
|
|
|
; CHECK: cmpq $1, %rdi
|
|
|
|
; CHECK: sbbq %rax, %rax
|
|
|
|
; CHECK: orq %rsi, %rax
|
|
|
|
; CHECK: ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
|
|
|
%cmp = icmp eq i64 %x, 0
|
|
|
|
%A = sext i1 %cmp to i64
|
|
|
|
%cond = or i64 %y, %A
|
|
|
|
ret i64 %cond
|
|
|
|
; CHECK: test9b:
|
|
|
|
; CHECK: cmpq $1, %rdi
|
|
|
|
; CHECK: sbbq %rax, %rax
|
|
|
|
; CHECK: orq %rsi, %rax
|
|
|
|
; CHECK: ret
|
|
|
|
}
|
|
|
|
|
|
|
|
;; Select between -1 and 1.
|
|
|
|
define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
|
|
|
%cmp = icmp eq i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 -1, i64 1
|
|
|
|
ret i64 %cond
|
|
|
|
; CHECK: test10:
|
|
|
|
; CHECK: cmpq $1, %rdi
|
|
|
|
; CHECK: sbbq %rax, %rax
|
|
|
|
; CHECK: orq $1, %rax
|
|
|
|
; CHECK: ret
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
generalize the previous check to handle -1 on either side of the
select, inserting a not to compensate. Add a missing isZero check
that I lost somehow.
This improves codegen of:
void *func(long count) {
return new int[count];
}
from:
__Z4funcl: ## @_Z4funcl
movl $4, %ecx ## encoding: [0xb9,0x04,0x00,0x00,0x00]
movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
mulq %rcx ## encoding: [0x48,0xf7,0xe1]
testq %rdx, %rdx ## encoding: [0x48,0x85,0xd2]
movq $-1, %rdi ## encoding: [0x48,0xc7,0xc7,0xff,0xff,0xff,0xff]
cmoveq %rax, %rdi ## encoding: [0x48,0x0f,0x44,0xf8]
jmp __Znam ## TAILCALL
## encoding: [0xeb,A]
to:
__Z4funcl: ## @_Z4funcl
movl $4, %ecx ## encoding: [0xb9,0x04,0x00,0x00,0x00]
movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
mulq %rcx ## encoding: [0x48,0xf7,0xe1]
cmpq $1, %rdx ## encoding: [0x48,0x83,0xfa,0x01]
sbbq %rdi, %rdi ## encoding: [0x48,0x19,0xff]
notq %rdi ## encoding: [0x48,0xf7,0xd7]
orq %rax, %rdi ## encoding: [0x48,0x09,0xc7]
jmp __Znam ## TAILCALL
## encoding: [0xeb,A]
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120932 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-05 02:00:51 +00:00
|
|
|
define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
|
|
|
%cmp = icmp eq i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 %y, i64 -1
|
|
|
|
ret i64 %cond
|
|
|
|
; CHECK: test11:
|
|
|
|
; CHECK: cmpq $1, %rdi
|
|
|
|
; CHECK: sbbq %rax, %rax
|
|
|
|
; CHECK: notq %rax
|
|
|
|
; CHECK: orq %rsi, %rax
|
|
|
|
; CHECK: ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test11a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
|
|
|
%cmp = icmp ne i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 -1, i64 %y
|
|
|
|
ret i64 %cond
|
|
|
|
; CHECK: test11a:
|
|
|
|
; CHECK: cmpq $1, %rdi
|
|
|
|
; CHECK: sbbq %rax, %rax
|
|
|
|
; CHECK: notq %rax
|
|
|
|
; CHECK: orq %rsi, %rax
|
|
|
|
; CHECK: ret
|
|
|
|
}
|
|
|
|
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120929 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-05 01:23:24 +00:00
|
|
|
|
Teach X86ISelLowering that the second result of X86ISD::UMUL is a flags
result. This allows us to compile:
void *test12(long count) {
return new int[count];
}
into:
test12:
movl $4, %ecx
movq %rdi, %rax
mulq %rcx
movq $-1, %rdi
cmovnoq %rax, %rdi
jmp __Znam ## TAILCALL
instead of:
test12:
movl $4, %ecx
movq %rdi, %rax
mulq %rcx
seto %cl
testb %cl, %cl
movq $-1, %rdi
cmoveq %rax, %rdi
jmp __Znam
Of course it would be even better if the regalloc inverted the cmov to 'cmovoq',
which would eliminate the need for the 'movq %rdi, %rax'.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120936 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-05 07:49:54 +00:00
|
|
|
declare noalias i8* @_Znam(i64) noredzone
|
|
|
|
|
|
|
|
define noalias i8* @test12(i64 %count) nounwind ssp noredzone {
|
|
|
|
entry:
|
|
|
|
%A = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %count, i64 4)
|
|
|
|
%B = extractvalue { i64, i1 } %A, 1
|
|
|
|
%C = extractvalue { i64, i1 } %A, 0
|
|
|
|
%D = select i1 %B, i64 -1, i64 %C
|
|
|
|
%call = tail call noalias i8* @_Znam(i64 %D) nounwind noredzone
|
|
|
|
ret i8* %call
|
|
|
|
; CHECK: test12:
|
|
|
|
; CHECK: mulq
|
|
|
|
; CHECK: movq $-1, %rdi
|
|
|
|
; CHECK: cmovnoq %rax, %rdi
|
|
|
|
; CHECK: jmp __Znam
|
|
|
|
}
|
|
|
|
|
|
|
|
declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
|
|
|
|
|
X86: Lower a select directly to a setcc_carry if possible.
int test(unsigned long a, unsigned long b) { return -(a < b); }
compiles to
_test: ## @test
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
sbbl %eax, %eax ## encoding: [0x19,0xc0]
ret ## encoding: [0xc3]
instead of
_test: ## @test
xorl %ecx, %ecx ## encoding: [0x31,0xc9]
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
movl $-1, %eax ## encoding: [0xb8,0xff,0xff,0xff,0xff]
cmovael %ecx, %eax ## encoding: [0x0f,0x43,0xc1]
ret ## encoding: [0xc3]
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122451 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-22 23:09:28 +00:00
|
|
|
define i32 @test13(i32 %a, i32 %b) nounwind {
|
|
|
|
%c = icmp ult i32 %a, %b
|
|
|
|
%d = sext i1 %c to i32
|
|
|
|
ret i32 %d
|
|
|
|
; CHECK: test13:
|
|
|
|
; CHECK: cmpl
|
|
|
|
; CHECK-NEXT: sbbl
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
}
|
Teach X86ISelLowering that the second result of X86ISD::UMUL is a flags
result. This allows us to compile:
void *test12(long count) {
return new int[count];
}
into:
test12:
movl $4, %ecx
movq %rdi, %rax
mulq %rcx
movq $-1, %rdi
cmovnoq %rax, %rdi
jmp __Znam ## TAILCALL
instead of:
test12:
movl $4, %ecx
movq %rdi, %rax
mulq %rcx
seto %cl
testb %cl, %cl
movq $-1, %rdi
cmoveq %rax, %rdi
jmp __Znam
Of course it would be even better if the regalloc inverted the cmov to 'cmovoq',
which would eliminate the need for the 'movq %rdi, %rax'.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120936 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-05 07:49:54 +00:00
|
|
|
|
X86: Lower a select directly to a setcc_carry if possible.
int test(unsigned long a, unsigned long b) { return -(a < b); }
compiles to
_test: ## @test
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
sbbl %eax, %eax ## encoding: [0x19,0xc0]
ret ## encoding: [0xc3]
instead of
_test: ## @test
xorl %ecx, %ecx ## encoding: [0x31,0xc9]
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
movl $-1, %eax ## encoding: [0xb8,0xff,0xff,0xff,0xff]
cmovael %ecx, %eax ## encoding: [0x0f,0x43,0xc1]
ret ## encoding: [0xc3]
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122451 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-22 23:09:28 +00:00
|
|
|
define i32 @test14(i32 %a, i32 %b) nounwind {
|
|
|
|
%c = icmp uge i32 %a, %b
|
|
|
|
%d = sext i1 %c to i32
|
|
|
|
ret i32 %d
|
|
|
|
; CHECK: test14:
|
|
|
|
; CHECK: cmpl
|
|
|
|
; CHECK-NEXT: sbbl
|
|
|
|
; CHECK-NEXT: notl
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
}
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@120929 91177308-0d34-0410-b5e6-96231b3b80d8
2010-12-05 01:23:24 +00:00
|
|
|
|