Teach the inline spiller to attempt folding a load instruction into its single

use before rematerializing the load.

This allows us to produce:

    addps	LCPI0_1(%rip), %xmm2

Instead of:

    movaps	LCPI0_1(%rip), %xmm3
    addps	%xmm3, %xmm2

Saving a register and an instruction. The standard spiller already knows how to
do this.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122133 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jakob Stoklund Olesen 2010-12-18 03:04:14 +00:00
parent 87c6d25c71
commit 83d1ba5728
2 changed files with 27 additions and 5 deletions

View File

@ -85,7 +85,8 @@ private:
bool coalesceStackAccess(MachineInstr *MI);
bool foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops);
const SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI = 0);
void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
};
@ -141,6 +142,14 @@ bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
}
}
// Before rematerializing into a register for a single instruction, try to
// fold a load into the instruction. That avoids allocating a new register.
if (RM.OrigMI->getDesc().canFoldAsLoad() &&
foldMemoryOperand(MI, Ops, RM.OrigMI)) {
edit_->markRematerialized(RM.ParentVNI);
return true;
}
// Alocate a new register for the remat.
LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_);
NewLI.markNotSpillable();
@ -243,9 +252,13 @@ bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
}
/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
/// Return true on success, and MI will be erased.
/// @param MI Instruction using or defining the current register.
/// @param Ops Operandices from readsWritesVirtualRegister().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return True on success, and MI will be erased.
bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops) {
const SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) {
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
@ -262,11 +275,14 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
FoldOps.push_back(Idx);
}
MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
MachineInstr *FoldMI =
LoadMI ? tii_.foldMemoryOperand(MI, FoldOps, LoadMI)
: tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
if (!FoldMI)
return false;
lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
vrm_.addSpillSlotUse(stackSlot_, FoldMI);
if (!LoadMI)
vrm_.addSpillSlotUse(stackSlot_, FoldMI);
MI->eraseFromParent();
DEBUG(dbgs() << "\tfolded: " << *FoldMI);
return true;

View File

@ -117,6 +117,12 @@ public:
const TargetInstrInfo&,
const TargetRegisterInfo&);
/// markRematerialized - explicitly mark a value as rematerialized after doing
/// it manually.
void markRematerialized(VNInfo *ParentVNI) {
rematted_.insert(ParentVNI);
}
/// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
bool didRematerialize(VNInfo *ParentVNI) const {
return rematted_.count(ParentVNI);