mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Simplify operand folding when rematerializing a load.
We already know how to fold a reload from a frameindex without analyzing the load instruction. Generalize this to handle any frameindex load. This streamlines the logic for rematerializing loads from stack arguments. As a side effect, it allows stackmaps to record a stack argument location without spilling it. Verified no effect on codegen for llvm test-suite. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194497 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
2f08e75a45
commit
0085d5e5ae
@ -4292,6 +4292,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
// If loading from a FrameIndex, fold directly from the FrameIndex.
|
||||
unsigned NumOps = LoadMI->getDesc().getNumOperands();
|
||||
int FrameIndex;
|
||||
if (isLoadFromStackSlot(LoadMI, FrameIndex))
|
||||
return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
|
||||
|
||||
// Check switch flag
|
||||
if (NoFusing) return NULL;
|
||||
|
||||
@ -4417,7 +4423,6 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
return NULL;
|
||||
|
||||
// Folding a normal load. Just copy the load's address operands.
|
||||
unsigned NumOps = LoadMI->getDesc().getNumOperands();
|
||||
for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
|
||||
MOs.push_back(LoadMI->getOperand(i));
|
||||
break;
|
||||
|
@ -1,4 +1,4 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -disable-fp-elim | FileCheck %s
|
||||
;
|
||||
; Note: Print verbose stackmaps using -debug-only=stackmaps.
|
||||
|
||||
@ -182,21 +182,16 @@ entry:
|
||||
; CHECK: .long 11
|
||||
; CHECK-NEXT: .long L{{.*}}-_spilledValue
|
||||
; CHECK-NEXT: .short 0
|
||||
; CHECK-NEXT: .short 10
|
||||
; CHECK-NEXT: .short 17
|
||||
;
|
||||
; Check that at least one is a spilled entry (Indirect).
|
||||
; Check that at least one is a spilled entry from RBP.
|
||||
; Location: Indirect RBP + ...
|
||||
; CHECK: .byte 3
|
||||
; CHECK: .byte 0
|
||||
; CHECK: .short 6
|
||||
define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16) {
|
||||
entry:
|
||||
%resolveCall = inttoptr i64 -559038737 to i8*
|
||||
call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 11, i32 15, i8* %resolveCall, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9)
|
||||
|
||||
; FIXME: The Spiller needs to be able to fold all rematted loads! This
|
||||
; can be seen by adding %l15 to the stackmap.
|
||||
; <rdar:/15202984> [JS] Ran out of registers during register allocation
|
||||
; %resolveCall = inttoptr i64 -559038737 to i8*
|
||||
; call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 12, i32 15, i8* %resolveCall, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
|
||||
call void (i32, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i32 11, i32 15, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16)
|
||||
ret void
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user