We lower setb to sbb with the hope that the and will go away, when it

doesn't, match it back to setb.

On a 64-bit version of the testcase before we'd get:

	movq	%rdi, %rax
	addq	%rsi, %rax
	sbbb	%dl, %dl
	andb	$1, %dl
	ret

now we get:

	movq	%rdi, %rax
	addq	%rsi, %rax
	setb	%dl
	ret




git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122217 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2010-12-20 01:16:03 +00:00
parent b085181258
commit 39ffcb7b62
2 changed files with 15 additions and 0 deletions

View File

@ -207,6 +207,12 @@ def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
(SETB_C64r)>;
// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
// will be eliminated and that the sbb can be extended up to a wider type. When
// this happens, it is great. However, if we are left with an 8-bit sbb and an
// and, we might as well just match it as a setb.
def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
(SETBr)>;
//===----------------------------------------------------------------------===//
// String Pseudo Instructions

View File

@ -92,3 +92,12 @@ define i64 @test6(i64 %A, i32 %B) nounwind {
; X64: ret
}
define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind {
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
ret {i32, i1} %t
}
; X64: test7:
; X64: addl %esi, %eax
; X64-NEXT: setb %dl
; X64-NEXT: ret