llvm-6502/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
Chandler Carruth 9e67db4af1 Flip the new block-placement pass to be on by default.
This is mostly to test the waters. I'd like to get results from FNT
build bots and other bots running on non-x86 platforms.

This feature has been pretty heavily tested over the last few months by
me, and it fixes several of the execution time regressions caused by the
inlining work by preventing inlining decisions from radically impacting
block layout.

I've seen very large improvements in yacr2 and ackermann benchmarks,
along with the expected noise across all of the benchmark suite whenever
code layout changes. I've analyzed all of the regressions and fixed
them, or found them to be impossible to fix. See my email to llvmdev for
more details.

I'd like for this to be in 3.1 as it complements the inliner changes,
but if any failures are showing up or anyone has concerns, it is just
a flag flip and so can be easily turned off.

I'm switching it on tonight to try and get at least one run through
various folks' performance suites in case SPEC or something else has
serious issues with it. I'll watch bots and revert if anything shows up.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@154816 91177308-0d34-0410-b5e6-96231b3b80d8
2012-04-16 13:49:17 +00:00

95 lines
2.5 KiB
LLVM

; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
define void @foo(i32 %X, i32 %Y) {
entry:
; CHECK: foo:
; CHECK: it ne
; CHECK: cmpne
; CHECK: it hi
; CHECK: pophi {r7, pc}
%tmp1 = icmp ult i32 %X, 4 ; <i1> [#uses=1]
%tmp4 = icmp eq i32 %Y, 0 ; <i1> [#uses=1]
%tmp7 = or i1 %tmp4, %tmp1 ; <i1> [#uses=1]
br i1 %tmp7, label %cond_true, label %UnifiedReturnBlock
cond_true: ; preds = %entry
%tmp10 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
ret void
UnifiedReturnBlock: ; preds = %entry
ret void
}
declare i32 @bar(...)
; FIXME: Need post-ifcvt branch folding to get rid of the extra br at end of BB1.
%struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
entry:
; CHECK: CountTree:
; CHECK: itt eq
; CHECK: moveq
; CHECK: popeq
; CHECK: bne
; CHECK: cmp
; CHECK: it eq
; CHECK: cmpeq
br label %tailrecurse
tailrecurse: ; preds = %bb, %entry
%tmp6 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1]
%tmp9 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=2]
%tmp12 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1]
%tmp14 = icmp eq %struct.quad_struct* null, null ; <i1> [#uses=1]
%tmp17 = icmp eq %struct.quad_struct* %tmp6, null ; <i1> [#uses=1]
%tmp23 = icmp eq %struct.quad_struct* %tmp9, null ; <i1> [#uses=1]
%tmp29 = icmp eq %struct.quad_struct* %tmp12, null ; <i1> [#uses=1]
%bothcond = and i1 %tmp17, %tmp14 ; <i1> [#uses=1]
%bothcond1 = and i1 %bothcond, %tmp23 ; <i1> [#uses=1]
%bothcond2 = and i1 %bothcond1, %tmp29 ; <i1> [#uses=1]
br i1 %bothcond2, label %return, label %bb
bb: ; preds = %tailrecurse
%tmp41 = tail call fastcc i32 @CountTree( %struct.quad_struct* %tmp9 ) ; <i32> [#uses=0]
br label %tailrecurse
return: ; preds = %tailrecurse
ret i32 0
}
%struct.SString = type { i8*, i32, i32 }
declare void @abort()
define fastcc void @t1(%struct.SString* %word, i8 signext %c) {
entry:
; CHECK: t1:
; CHECK: it ne
; CHECK: popne {r7, pc}
%tmp1 = icmp eq %struct.SString* %word, null ; <i1> [#uses=1]
br i1 %tmp1, label %cond_true, label %cond_false
cond_true: ; preds = %entry
tail call void @abort( )
unreachable
cond_false: ; preds = %entry
ret void
}
define fastcc void @t2() nounwind {
entry:
; CHECK: t2:
; CHECK: cmp r0, #0
; CHECK: bne
br i1 undef, label %bb.i.i3, label %growMapping.exit
bb.i.i3: ; preds = %entry
unreachable
growMapping.exit: ; preds = %entry
unreachable
}