mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
439cc2c5de
Specifically, if there are copy-like instructions in the loop header they are moved into the loop close to their uses. This reduces the live intervals of the values and can avoid register spills. This is working towards a fix for http://llvm.org/PR22230. Review: http://reviews.llvm.org/D7259 Next steps: - Find a better cost model (which non-copy instructions should be sunk?) - Make this dependent on register pressure git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@232262 91177308-0d34-0410-b5e6-96231b3b80d8
63 lines
1.7 KiB
LLVM
63 lines
1.7 KiB
LLVM
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefix=CHECK
|
|
; RUN: llc < %s -mtriple=x86_64-linux -sink-insts-to-avoid-spills | FileCheck %s -check-prefix=SINK
|
|
|
|
; Ensure that we sink copy-like instructions into loops to avoid register
|
|
; spills.
|
|
|
|
; CHECK: Spill
|
|
; SINK-NOT: Spill
|
|
|
|
%struct.A = type { i32, i32, i32, i32, i32, i32 }
|
|
|
|
define void @_Z1fPhP1A(i8* nocapture readonly %input, %struct.A* %a) {
|
|
%1 = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 0
|
|
%2 = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 1
|
|
%3 = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 2
|
|
%4 = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 3
|
|
%5 = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 4
|
|
%6 = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 5
|
|
br label %.backedge
|
|
|
|
.backedge:
|
|
%.0 = phi i8* [ %input, %0 ], [ %7, %.backedge.backedge ]
|
|
%7 = getelementptr inbounds i8, i8* %.0, i64 1
|
|
%8 = load i8, i8* %7, align 1
|
|
switch i8 %8, label %.backedge.backedge [
|
|
i8 0, label %9
|
|
i8 10, label %10
|
|
i8 20, label %11
|
|
i8 30, label %12
|
|
i8 40, label %13
|
|
i8 50, label %14
|
|
]
|
|
|
|
; <label>:9
|
|
tail call void @_Z6assignPj(i32* %1)
|
|
br label %.backedge.backedge
|
|
|
|
; <label>:10
|
|
tail call void @_Z6assignPj(i32* %2)
|
|
br label %.backedge.backedge
|
|
|
|
.backedge.backedge:
|
|
br label %.backedge
|
|
|
|
; <label>:11
|
|
tail call void @_Z6assignPj(i32* %3)
|
|
br label %.backedge.backedge
|
|
|
|
; <label>:12
|
|
tail call void @_Z6assignPj(i32* %4)
|
|
br label %.backedge.backedge
|
|
|
|
; <label>:13
|
|
tail call void @_Z6assignPj(i32* %5)
|
|
br label %.backedge.backedge
|
|
|
|
; <label>:14
|
|
tail call void @_Z6assignPj(i32* %6)
|
|
br label %.backedge.backedge
|
|
}
|
|
|
|
declare void @_Z6assignPj(i32*)
|