llvm-6502/test/CodeGen/X86/block-placement.ll
Chandler Carruth 3071363bcd Completely re-write the algorithm behind MachineBlockPlacement based on
discussions with Andy. Fundamentally, the previous algorithm is both
counter productive on several fronts and prioritizing things which
aren't necessarily the most important: static branch prediction.

The new algorithm uses the existing loop CFG structure information to
walk through the CFG itself to layout blocks. It coalesces adjacent
blocks within the loop where the CFG allows based on the most likely
path taken. Finally, it topologically orders the block chains that have
been formed. This allows it to choose a (mostly) topologically valid
ordering which still priorizes fallthrough within the structural
constraints.

As a final twist in the algorithm, it does violate the CFG when it
discovers a "hot" edge, that is an edge that is more than 4x hotter than
the competing edges in the CFG. These are forcibly merged into
a fallthrough chain.

Future transformations that need te be added are rotation of loop exit
conditions to be fallthrough, and better isolation of cold block chains.
I'm also planning on adding statistics to model how well the algorithm
does at laying out blocks based on the probabilities it receives.

The old tests mostly still pass, and I have some new tests to add, but
the nested loops are still behaving very strangely. This almost seems
like working-as-intended as it rotated the exit branch to be
fallthrough, but I'm not convinced this is actually the best layout. It
is well supported by the probabilities for loops we currently get, but
those are pretty broken for nested loops, so this may change later.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@142743 91177308-0d34-0410-b5e6-96231b3b80d8
2011-10-23 09:18:45 +00:00

142 lines
3.4 KiB
LLVM

; RUN: llc -march=x86 -enable-block-placement < %s | FileCheck %s
declare void @error(i32 %i, i32 %a, i32 %b)
define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
; Test a chain of ifs, where the block guarded by the if is error handling code
; that is not expected to run.
; CHECK: test_ifchains:
; CHECK: %entry
; CHECK: %else1
; CHECK: %else2
; CHECK: %else3
; CHECK: %else4
; CHECK: %exit
; CHECK: %then1
; CHECK: %then2
; CHECK: %then3
; CHECK: %then4
; CHECK: %then5
entry:
%gep1 = getelementptr i32* %a, i32 1
%val1 = load i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then1, label %else1, !prof !0
then1:
call void @error(i32 %i, i32 1, i32 %b)
br label %else1
else1:
%gep2 = getelementptr i32* %a, i32 2
%val2 = load i32* %gep2
%cond2 = icmp ugt i32 %val2, 2
br i1 %cond2, label %then2, label %else2, !prof !0
then2:
call void @error(i32 %i, i32 1, i32 %b)
br label %else2
else2:
%gep3 = getelementptr i32* %a, i32 3
%val3 = load i32* %gep3
%cond3 = icmp ugt i32 %val3, 3
br i1 %cond3, label %then3, label %else3, !prof !0
then3:
call void @error(i32 %i, i32 1, i32 %b)
br label %else3
else3:
%gep4 = getelementptr i32* %a, i32 4
%val4 = load i32* %gep4
%cond4 = icmp ugt i32 %val4, 4
br i1 %cond4, label %then4, label %else4, !prof !0
then4:
call void @error(i32 %i, i32 1, i32 %b)
br label %else4
else4:
%gep5 = getelementptr i32* %a, i32 3
%val5 = load i32* %gep5
%cond5 = icmp ugt i32 %val5, 3
br i1 %cond5, label %then5, label %exit, !prof !0
then5:
call void @error(i32 %i, i32 1, i32 %b)
br label %exit
exit:
ret i32 %b
}
!0 = metadata !{metadata !"branch_weights", i32 4, i32 64}
define i32 @test_loop_align(i32 %i, i32* %a) {
; Check that we provide basic loop body alignment with the block placement
; pass.
; CHECK: test_loop_align:
; CHECK: %entry
; CHECK: .align [[ALIGN:[0-9]+]],
; CHECK-NEXT: %body
; CHECK: %exit
entry:
br label %body
body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
%arrayidx = getelementptr inbounds i32* %a, i32 %iv
%0 = load i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
br i1 %exitcond, label %exit, label %body
exit:
ret i32 %sum
}
define i32 @test_nested_loop_align(i32 %i, i32* %a, i32* %b) {
; Check that we provide nested loop body alignment.
; CHECK: test_nested_loop_align:
; CHECK: %entry
; CHECK: .align [[ALIGN]],
; CHECK-NEXT: %loop.body.2
; CHECK: .align [[ALIGN]],
; CHECK-NEXT: %inner.loop.body
; CHECK-NOT: .align
; CHECK: %exit
entry:
br label %loop.body.1
loop.body.1:
%iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
%arrayidx = getelementptr inbounds i32* %a, i32 %iv
%bidx = load i32* %arrayidx
br label %inner.loop.body
inner.loop.body:
%inner.iv = phi i32 [ 0, %loop.body.1 ], [ %inner.next, %inner.loop.body ]
%base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
%scaled_idx = mul i32 %bidx, %iv
%inner.arrayidx = getelementptr inbounds i32* %b, i32 %scaled_idx
%0 = load i32* %inner.arrayidx
%sum = add nsw i32 %0, %base
%inner.next = add i32 %iv, 1
%inner.exitcond = icmp eq i32 %inner.next, %i
br i1 %inner.exitcond, label %loop.body.2, label %inner.loop.body
loop.body.2:
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
br i1 %exitcond, label %exit, label %loop.body.1
exit:
ret i32 %sum
}