Enable re-materialization of instructions which have virtual register operands if

the definition of the operand also reaches its uses.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47475 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2008-02-22 09:24:50 +00:00
parent d2b1fb27df
commit d70dbb5d62
3 changed files with 212 additions and 48 deletions

View File

@ -56,6 +56,7 @@ namespace llvm {
class LiveIntervals : public MachineFunctionPass { class LiveIntervals : public MachineFunctionPass {
MachineFunction* mf_; MachineFunction* mf_;
MachineRegisterInfo* mri_;
const TargetMachine* tm_; const TargetMachine* tm_;
const TargetRegisterInfo* tri_; const TargetRegisterInfo* tri_;
const TargetInstrInfo* tii_; const TargetInstrInfo* tii_;
@ -317,6 +318,18 @@ namespace llvm {
unsigned MIIdx, unsigned MIIdx,
LiveInterval &interval, bool isAlias = false); LiveInterval &interval, bool isAlias = false);
/// getReMatImplicitUse - If the remat definition MI has one (for now, we
/// only allow one) virtual register operand, then its uses are implicitly
/// using the register. Returns the virtual register.
unsigned getReMatImplicitUse(const LiveInterval &li,
MachineInstr *MI) const;
/// isValNoAvailableAt - Return true if the val# of the specified interval
/// which reaches the given instruction also reaches the specified use
/// index.
bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
unsigned UseIdx) const;
/// isReMaterializable - Returns true if the definition MI of the specified /// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable. Also returns true /// val# of the specified interval is re-materializable. Also returns true
/// by reference if the def is a load. /// by reference if the def is a load.
@ -332,10 +345,11 @@ namespace llvm {
SmallVector<unsigned, 2> &Ops, SmallVector<unsigned, 2> &Ops,
bool isSS, int Slot, unsigned Reg); bool isSS, int Slot, unsigned Reg);
/// canFoldMemoryOperand - Returns true if the specified load / store /// canFoldMemoryOperand - Return true if the specified load / store
/// folding is possible. /// folding is possible.
bool canFoldMemoryOperand(MachineInstr *MI, bool canFoldMemoryOperand(MachineInstr *MI,
SmallVector<unsigned, 2> &Ops) const; SmallVector<unsigned, 2> &Ops) const;
bool canFoldMemoryOperand(MachineInstr *MI, unsigned Reg) const;
/// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified /// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
/// VNInfo that's after the specified index but is within the basic block. /// VNInfo that's after the specified index but is within the basic block.
@ -361,26 +375,28 @@ namespace llvm {
BitVector &RestoreMBBs, BitVector &RestoreMBBs,
std::map<unsigned,std::vector<SRInfo> >&RestoreIdxes); std::map<unsigned,std::vector<SRInfo> >&RestoreIdxes);
/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
/// interval on to-be re-materialized operands of MI) with new register.
void rewriteImplicitOps(const LiveInterval &li,
MachineInstr *MI, unsigned NewVReg, VirtRegMap &vrm);
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper
/// functions for addIntervalsForSpills to rewrite uses / defs for the given /// functions for addIntervalsForSpills to rewrite uses / defs for the given
/// live range. /// live range.
bool rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit, bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
unsigned id, unsigned index, unsigned end, MachineInstr *MI, bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot, MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
VirtRegMap &vrm, MachineRegisterInfo &RegMap, VirtRegMap &vrm, const TargetRegisterClass* rc,
const TargetRegisterClass* rc, SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
SmallVector<int, 4> &ReMatIds,
unsigned &NewVReg, bool &HasDef, bool &HasUse, unsigned &NewVReg, bool &HasDef, bool &HasUse,
const MachineLoopInfo *loopInfo,
std::map<unsigned,unsigned> &MBBVRegsMap, std::map<unsigned,unsigned> &MBBVRegsMap,
std::vector<LiveInterval*> &NewLIs); std::vector<LiveInterval*> &NewLIs);
void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
LiveInterval::Ranges::const_iterator &I, LiveInterval::Ranges::const_iterator &I,
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot, MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
VirtRegMap &vrm, MachineRegisterInfo &RegMap, VirtRegMap &vrm, const TargetRegisterClass* rc,
const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo, SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
BitVector &SpillMBBs, BitVector &SpillMBBs,
std::map<unsigned,std::vector<SRInfo> > &SpillIdxes, std::map<unsigned,std::vector<SRInfo> > &SpillIdxes,

View File

@ -83,6 +83,7 @@ void LiveIntervals::releaseMemory() {
/// ///
bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
mf_ = &fn; mf_ = &fn;
mri_ = &mf_->getRegInfo();
tm_ = &fn.getTarget(); tm_ = &fn.getTarget();
tri_ = tm_->getRegisterInfo(); tri_ = tm_->getRegisterInfo();
tii_ = tm_->getInstrInfo(); tii_ = tm_->getInstrInfo();
@ -598,6 +599,38 @@ unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
// Register allocator hooks. // Register allocator hooks.
// //
/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
/// allow one) virtual register operand, then its uses are implicitly using
/// the register. Returns the virtual register.
unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
MachineInstr *MI) const {
unsigned RegOp = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isRegister() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (Reg == 0 || Reg == li.reg)
continue;
// FIXME: For now, only remat MI with at most one register operand.
assert(!RegOp &&
"Can't rematerialize instruction with multiple register operand!");
RegOp = MO.getReg();
break;
}
return RegOp;
}
/// isValNoAvailableAt - Return true if the val# of the specified interval
/// which reaches the given instruction also reaches the specified use index.
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
unsigned UseIdx) const {
unsigned Index = getInstructionIndex(MI);
VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
return UI != li.end() && UI->valno == ValNo;
}
/// isReMaterializable - Returns true if the definition MI of the specified /// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable. /// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool LiveIntervals::isReMaterializable(const LiveInterval &li,
@ -608,8 +641,25 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
isLoad = false; isLoad = false;
const TargetInstrDesc &TID = MI->getDesc(); const TargetInstrDesc &TID = MI->getDesc();
if (TID.isImplicitDef() || tii_->isTriviallyReMaterializable(MI)) { if (TID.isImplicitDef())
return true;
if (tii_->isTriviallyReMaterializable(MI)) {
isLoad = TID.isSimpleLoad(); isLoad = TID.isSimpleLoad();
unsigned ImpUse = getReMatImplicitUse(li, MI);
if (ImpUse) {
const LiveInterval &ImpLi = getInterval(ImpUse);
for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
re = mri_->use_end(); ri != re; ++ri) {
MachineInstr *UseMI = &*ri;
unsigned UseIdx = getInstructionIndex(UseMI);
if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
continue;
if (!canFoldMemoryOperand(UseMI, li.reg) &&
!isValNoAvailableAt(ImpLi, MI, UseIdx))
return false;
}
}
return true; return true;
} }
@ -654,7 +704,8 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
return false; return false;
MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx); MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
bool DefIsLoad = false; bool DefIsLoad = false;
if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad)) if (!ReMatDefMI ||
!isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
return false; return false;
isLoad |= DefIsLoad; isLoad |= DefIsLoad;
} }
@ -684,14 +735,16 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
SmallVector<unsigned, 2> FoldOps; SmallVector<unsigned, 2> FoldOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) { for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
unsigned OpIdx = Ops[i]; unsigned OpIdx = Ops[i];
MachineOperand &MO = MI->getOperand(OpIdx);
// FIXME: fold subreg use. // FIXME: fold subreg use.
if (MI->getOperand(OpIdx).getSubReg()) if (MO.getSubReg())
return false; return false;
if (MI->getOperand(OpIdx).isDef()) if (MO.isDef())
MRInfo |= (unsigned)VirtRegMap::isMod; MRInfo |= (unsigned)VirtRegMap::isMod;
else { else {
// Filter out two-address use operand(s). // Filter out two-address use operand(s).
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { if (!MO.isImplicit() &&
TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
MRInfo = VirtRegMap::isModRef; MRInfo = VirtRegMap::isModRef;
continue; continue;
} }
@ -740,6 +793,23 @@ bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
return tii_->canFoldMemoryOperand(MI, FoldOps); return tii_->canFoldMemoryOperand(MI, FoldOps);
} }
bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, unsigned Reg) const {
SmallVector<unsigned, 2> FoldOps;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand& mop = MI->getOperand(i);
if (!mop.isRegister())
continue;
unsigned UseReg = mop.getReg();
if (UseReg != Reg)
continue;
// FIXME: fold subreg use.
if (mop.getSubReg())
return false;
FoldOps.push_back(i);
}
return tii_->canFoldMemoryOperand(MI, FoldOps);
}
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
SmallPtrSet<MachineBasicBlock*, 4> MBBs; SmallPtrSet<MachineBasicBlock*, 4> MBBs;
for (LiveInterval::Ranges::const_iterator for (LiveInterval::Ranges::const_iterator
@ -757,19 +827,43 @@ bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
return true; return true;
} }
/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
/// interval on to-be re-materialized operands of MI) with new register.
void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
MachineInstr *MI, unsigned NewVReg,
VirtRegMap &vrm) {
// There is an implicit use. That means one of the other operand is
// being remat'ed and the remat'ed instruction has li.reg as an
// use operand. Make sure we rewrite that as well.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isRegister())
continue;
unsigned Reg = MO.getReg();
if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
if (!vrm.isReMaterialized(Reg))
continue;
MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
int OpIdx = ReMatMI->findRegisterUseOperandIdx(li.reg);
if (OpIdx != -1)
ReMatMI->getOperand(OpIdx).setReg(NewVReg);
}
}
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range. /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
bool LiveIntervals:: bool LiveIntervals::
rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit, rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
unsigned id, unsigned index, unsigned end, MachineInstr *MI, bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot, unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
VirtRegMap &vrm, MachineRegisterInfo &RegInfo, VirtRegMap &vrm,
const TargetRegisterClass* rc, const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds, SmallVector<int, 4> &ReMatIds,
unsigned &NewVReg, bool &HasDef, bool &HasUse,
const MachineLoopInfo *loopInfo, const MachineLoopInfo *loopInfo,
unsigned &NewVReg, bool &HasDef, bool &HasUse,
std::map<unsigned,unsigned> &MBBVRegsMap, std::map<unsigned,unsigned> &MBBVRegsMap,
std::vector<LiveInterval*> &NewLIs) { std::vector<LiveInterval*> &NewLIs) {
bool CanFold = false; bool CanFold = false;
@ -794,6 +888,14 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
if (MI == ReMatOrigDefMI && CanDelete) { if (MI == ReMatOrigDefMI && CanDelete) {
DOUT << "\t\t\t\tErasing re-materlizable def: "; DOUT << "\t\t\t\tErasing re-materlizable def: ";
DOUT << MI << '\n'; DOUT << MI << '\n';
unsigned ImpUse = getReMatImplicitUse(li, MI);
if (ImpUse) {
// To be deleted MI has a virtual register operand, update the
// spill weight of the register interval.
unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
LiveInterval &ImpLi = getInterval(ImpUse);
ImpLi.weight -= getSpillWeight(false, true, loopDepth);
}
RemoveMachineInstrFromMaps(MI); RemoveMachineInstrFromMaps(MI);
vrm.RemoveMachineInstrFromMaps(MI); vrm.RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent(); MI->eraseFromParent();
@ -862,24 +964,40 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
// Create a new virtual register for the spill interval. // Create a new virtual register for the spill interval.
bool CreatedNewVReg = false; bool CreatedNewVReg = false;
if (NewVReg == 0) { if (NewVReg == 0) {
NewVReg = RegInfo.createVirtualRegister(rc); NewVReg = mri_->createVirtualRegister(rc);
vrm.grow(); vrm.grow();
CreatedNewVReg = true; CreatedNewVReg = true;
} }
mop.setReg(NewVReg); mop.setReg(NewVReg);
if (mop.isImplicit())
rewriteImplicitOps(li, MI, NewVReg, vrm);
// Reuse NewVReg for other reads. // Reuse NewVReg for other reads.
for (unsigned j = 0, e = Ops.size(); j != e; ++j) for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
MI->getOperand(Ops[j]).setReg(NewVReg); MachineOperand &mopj = MI->getOperand(Ops[j]);
mopj.setReg(NewVReg);
if (mopj.isImplicit())
rewriteImplicitOps(li, MI, NewVReg, vrm);
}
if (CreatedNewVReg) { if (CreatedNewVReg) {
if (DefIsReMat) { if (DefIsReMat) {
unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
if (ImpUse) {
// Re-matting an instruction with virtual register use. Add the
// register as an implicit use on the use MI and update the register
// interval's spill weight.
unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
LiveInterval &ImpLi = getInterval(ImpUse);
ImpLi.weight += getSpillWeight(false, true, loopDepth);
MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
}
vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/); vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) { if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
// Each valnum may have its own remat id. // Each valnum may have its own remat id.
ReMatIds[id] = vrm.assignVirtReMatId(NewVReg); ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
} else { } else {
vrm.assignVirtReMatId(NewVReg, ReMatIds[id]); vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
} }
if (!CanDelete || (HasUse && HasDef)) { if (!CanDelete || (HasUse && HasDef)) {
// If this is a two-addr instruction then its use operands are // If this is a two-addr instruction then its use operands are
@ -981,7 +1099,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot, unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
VirtRegMap &vrm, MachineRegisterInfo &RegInfo, VirtRegMap &vrm,
const TargetRegisterClass* rc, const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds, SmallVector<int, 4> &ReMatIds,
const MachineLoopInfo *loopInfo, const MachineLoopInfo *loopInfo,
@ -999,8 +1117,8 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
// First collect all the def / use in this live range that will be rewritten. // First collect all the def / use in this live range that will be rewritten.
// Make sure they are sorted according instruction index. // Make sure they are sorted according instruction index.
std::vector<RewriteInfo> RewriteMIs; std::vector<RewriteInfo> RewriteMIs;
for (MachineRegisterInfo::reg_iterator ri = RegInfo.reg_begin(li.reg), for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
re = RegInfo.reg_end(); ri != re; ) { re = mri_->reg_end(); ri != re; ) {
MachineInstr *MI = &(*ri); MachineInstr *MI = &(*ri);
MachineOperand &O = ri.getOperand(); MachineOperand &O = ri.getOperand();
++ri; ++ri;
@ -1063,11 +1181,11 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
bool HasDef = false; bool HasDef = false;
bool HasUse = false; bool HasUse = false;
bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id, bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
index, end, MI, ReMatOrigDefMI, ReMatDefMI, index, end, MI, ReMatOrigDefMI, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg, CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs); HasDef, HasUse, MBBVRegsMap, NewLIs);
if (!HasDef && !HasUse) if (!HasDef && !HasUse)
continue; continue;
@ -1211,8 +1329,7 @@ addIntervalsForSpills(const LiveInterval &li,
std::map<unsigned, std::vector<SRInfo> > RestoreIdxes; std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
std::map<unsigned,unsigned> MBBVRegsMap; std::map<unsigned,unsigned> MBBVRegsMap;
std::vector<LiveInterval*> NewLIs; std::vector<LiveInterval*> NewLIs;
MachineRegisterInfo &RegInfo = mf_->getRegInfo(); const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg);
unsigned NumValNums = li.getNumValNums(); unsigned NumValNums = li.getNumValNums();
SmallVector<MachineInstr*, 4> ReMatDefs; SmallVector<MachineInstr*, 4> ReMatDefs;
@ -1257,13 +1374,13 @@ addIntervalsForSpills(const LiveInterval &li,
// Note ReMatOrigDefMI has already been deleted. // Note ReMatOrigDefMI has already been deleted.
rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
false, vrm, RegInfo, rc, ReMatIds, loopInfo, false, vrm, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
MBBVRegsMap, NewLIs); MBBVRegsMap, NewLIs);
} else { } else {
rewriteInstructionsForSpills(li, false, I, NULL, 0, rewriteInstructionsForSpills(li, false, I, NULL, 0,
Slot, 0, false, false, false, Slot, 0, false, false, false,
false, vrm, RegInfo, rc, ReMatIds, loopInfo, false, vrm, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
MBBVRegsMap, NewLIs); MBBVRegsMap, NewLIs);
} }
@ -1331,7 +1448,7 @@ addIntervalsForSpills(const LiveInterval &li,
(DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad()); (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo, CanDelete, vrm, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
MBBVRegsMap, NewLIs); MBBVRegsMap, NewLIs);
} }
@ -1446,6 +1563,16 @@ addIntervalsForSpills(const LiveInterval &li,
if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad()) if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
Ops, isLoadSS, LdSlot, VReg); Ops, isLoadSS, LdSlot, VReg);
unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
if (ImpUse) {
// Re-matting an instruction with virtual register use. Add the
// register as an implicit use on the use MI and update the register
// interval's spill weight.
unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
LiveInterval &ImpLi = getInterval(ImpUse);
ImpLi.weight += getSpillWeight(false, true, loopDepth);
MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
}
} }
} }
// If folding is not possible / failed, then tell the spiller to issue a // If folding is not possible / failed, then tell the spiller to issue a
@ -1471,8 +1598,8 @@ addIntervalsForSpills(const LiveInterval &li,
MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg); int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
assert(UseIdx != -1); assert(UseIdx != -1);
if (LastUse->getDesc().getOperandConstraint(UseIdx, TOI::TIED_TO) == if (LastUse->getOperand(UseIdx).isImplicit() ||
-1) { LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
LastUse->getOperand(UseIdx).setIsKill(); LastUse->getOperand(UseIdx).setIsKill();
vrm.addKillPoint(LI->reg, LastUseIdx); vrm.addKillPoint(LI->reg, LastUseIdx);
} }

View File

@ -574,6 +574,32 @@ static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
} }
} }
/// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
///
static void ReMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MII,
unsigned DestReg, unsigned Reg,
const TargetRegisterInfo *TRI,
VirtRegMap &VRM) {
TRI->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg));
MachineInstr *NewMI = prior(MII);
for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);
if (!MO.isRegister() || MO.getReg() == 0)
continue;
unsigned VirtReg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
continue;
assert(MO.isUse());
unsigned SubIdx = MO.getSubReg();
unsigned Phys = VRM.getPhys(VirtReg);
assert(Phys);
unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
MO.setReg(RReg);
}
++NumReMats;
}
// ReusedOp - For each reused operand, we keep track of a bit of information, in // ReusedOp - For each reused operand, we keep track of a bit of information, in
// case we need to rollback upon processing a new operand. See comments below. // case we need to rollback upon processing a new operand. See comments below.
@ -693,12 +719,11 @@ namespace {
MI, Spills, MaybeDeadStores, MI, Spills, MaybeDeadStores,
Rejected, RegKills, KillOps, VRM); Rejected, RegKills, KillOps, VRM);
MachineBasicBlock::iterator MII = MI;
if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
TRI->reMaterialize(*MBB, MI, NewPhysReg, ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TRI, VRM);
VRM.getReMaterializedMI(NewOp.VirtReg));
++NumReMats;
} else { } else {
TII->loadRegFromStackSlot(*MBB, MI, NewPhysReg, TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg,
NewOp.StackSlotOrReMat, AliasRC); NewOp.StackSlotOrReMat, AliasRC);
// Any stores to this stack slot are not dead anymore. // Any stores to this stack slot are not dead anymore.
MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
@ -710,7 +735,6 @@ namespace {
MI->getOperand(NewOp.Operand).setReg(NewPhysReg); MI->getOperand(NewOp.Operand).setReg(NewPhysReg);
Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg); Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg);
MachineBasicBlock::iterator MII = MI;
--MII; --MII;
UpdateKills(*MII, RegKills, KillOps); UpdateKills(*MII, RegKills, KillOps);
DOUT << '\t' << *MII; DOUT << '\t' << *MII;
@ -973,15 +997,13 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
if (VRM.isRestorePt(&MI)) { if (VRM.isRestorePt(&MI)) {
std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI); std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
unsigned VirtReg = RestoreRegs[i]; unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
if (!VRM.getPreSplitReg(VirtReg)) if (!VRM.getPreSplitReg(VirtReg))
continue; // Split interval spilled again. continue; // Split interval spilled again.
unsigned Phys = VRM.getPhys(VirtReg); unsigned Phys = VRM.getPhys(VirtReg);
RegInfo->setPhysRegUsed(Phys); RegInfo->setPhysRegUsed(Phys);
if (VRM.isReMaterialized(VirtReg)) { if (VRM.isReMaterialized(VirtReg)) {
TRI->reMaterialize(MBB, &MI, Phys, ReMaterialize(MBB, MII, Phys, VirtReg, TRI, VRM);
VRM.getReMaterializedMI(VirtReg));
++NumReMats;
} else { } else {
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg),
@ -1219,8 +1241,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
RegInfo->setPhysRegUsed(PhysReg); RegInfo->setPhysRegUsed(PhysReg);
ReusedOperands.markClobbered(PhysReg); ReusedOperands.markClobbered(PhysReg);
if (DoReMat) { if (DoReMat) {
TRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg)); ReMaterialize(MBB, MII, PhysReg, VirtReg, TRI, VRM);
++NumReMats;
} else { } else {
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);