mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-21 16:31:16 +00:00
6a7770b7ae
This changes the SelectionDAG scheduling preference to source order. Soon, the SelectionDAG scheduler can be bypassed saving a nice chunk of compile time. Performance differences that result from this change are often a consequence of register coalescing. The register coalescer is far from perfect. Bugs can be filed for deficiencies. On x86 SandyBridge/Haswell, the source order schedule is often preserved, particularly for small blocks. Register pressure is generally improved over the SD scheduler's ILP mode. However, we are still able to handle large blocks that require latency hiding, unlike the SD scheduler's BURR mode. MI scheduler also attempts to discover the critical path in single-block loops and adjust heuristics accordingly. The MI scheduler relies on the new machine model. This is currently unimplemented for AVX, so we may not be generating the best code yet. Unit tests are updated so they don't depend on SD scheduling heuristics. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192750 91177308-0d34-0410-b5e6-96231b3b80d8
78 lines
2.3 KiB
LLVM
78 lines
2.3 KiB
LLVM
; RUN: llc < %s -tailcallopt -code-model=medium -stack-alignment=4 -mtriple=i686-linux-gnu -mcpu=pentium | FileCheck %s
|
|
|
|
; Check the HiPE calling convention works (x86-32)
|
|
|
|
define void @zap(i32 %a, i32 %b) nounwind {
|
|
entry:
|
|
; CHECK: movl 40(%esp), %eax
|
|
; CHECK-NEXT: movl 44(%esp), %edx
|
|
; CHECK-NEXT: movl $8, %ecx
|
|
; CHECK-NEXT: calll addfour
|
|
%0 = call cc 11 {i32, i32, i32} @addfour(i32 undef, i32 undef, i32 %a, i32 %b, i32 8)
|
|
%res = extractvalue {i32, i32, i32} %0, 2
|
|
|
|
; CHECK: movl %eax, 16(%esp)
|
|
; CHECK-NEXT: movl $2, 12(%esp)
|
|
; CHECK-NEXT: movl $1, 8(%esp)
|
|
; CHECK: calll foo
|
|
tail call void @foo(i32 undef, i32 undef, i32 1, i32 2, i32 %res) nounwind
|
|
ret void
|
|
}
|
|
|
|
define cc 11 {i32, i32, i32} @addfour(i32 %hp, i32 %p, i32 %x, i32 %y, i32 %z) nounwind {
|
|
entry:
|
|
; CHECK: addl %edx, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
%0 = add i32 %x, %y
|
|
%1 = add i32 %0, %z
|
|
|
|
; CHECK: ret
|
|
%res = insertvalue {i32, i32, i32} undef, i32 %1, 2
|
|
ret {i32, i32, i32} %res
|
|
}
|
|
|
|
define cc 11 void @foo(i32 %hp, i32 %p, i32 %arg0, i32 %arg1, i32 %arg2) nounwind {
|
|
entry:
|
|
; CHECK: movl %esi, 16(%esp)
|
|
; CHECK-NEXT: movl %ebp, 12(%esp)
|
|
; CHECK-NEXT: movl %eax, 8(%esp)
|
|
; CHECK-NEXT: movl %edx, 4(%esp)
|
|
; CHECK-NEXT: movl %ecx, (%esp)
|
|
%hp_var = alloca i32
|
|
%p_var = alloca i32
|
|
%arg0_var = alloca i32
|
|
%arg1_var = alloca i32
|
|
%arg2_var = alloca i32
|
|
store i32 %hp, i32* %hp_var
|
|
store i32 %p, i32* %p_var
|
|
store i32 %arg0, i32* %arg0_var
|
|
store i32 %arg1, i32* %arg1_var
|
|
store i32 %arg2, i32* %arg2_var
|
|
|
|
; CHECK: movl 16(%esp), %esi
|
|
; CHECK-NEXT: movl 12(%esp), %ebp
|
|
; CHECK-NEXT: movl 8(%esp), %eax
|
|
; CHECK-NEXT: movl 4(%esp), %edx
|
|
%0 = load i32* %hp_var
|
|
%1 = load i32* %p_var
|
|
%2 = load i32* %arg0_var
|
|
%3 = load i32* %arg1_var
|
|
%4 = load i32* %arg2_var
|
|
; CHECK: jmp bar
|
|
tail call cc 11 void @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4) nounwind
|
|
ret void
|
|
}
|
|
|
|
define cc 11 void @baz() nounwind {
|
|
%tmp_clos = load i32* @clos
|
|
%tmp_clos2 = inttoptr i32 %tmp_clos to i32*
|
|
%indirect_call = bitcast i32* %tmp_clos2 to void (i32, i32, i32)*
|
|
; CHECK: movl $42, %eax
|
|
; CHECK-NEXT: jmpl *clos
|
|
tail call cc 11 void %indirect_call(i32 undef, i32 undef, i32 42) nounwind
|
|
ret void
|
|
}
|
|
|
|
@clos = external constant i32
|
|
declare cc 11 void @bar(i32, i32, i32, i32, i32)
|