mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-12 17:32:19 +00:00
- Mark last use of a split interval as kill instead of letting spiller track it.
This allows an important optimization to be re-enabled. - If all uses / defs of a split interval can be folded, give the interval a low spill weight so it would not be picked in case spilling is needed (avoid pushing other intervals in the same BB to be spilled). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44601 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
d64b5c82b9
commit
018f9b020b
@ -278,6 +278,8 @@ namespace llvm {
|
|||||||
SmallVector<unsigned, 2> &Ops,
|
SmallVector<unsigned, 2> &Ops,
|
||||||
bool isSS, int Slot, unsigned Reg);
|
bool isSS, int Slot, unsigned Reg);
|
||||||
|
|
||||||
|
/// canFoldMemoryOperand - Returns true if the specified load / store
|
||||||
|
/// folding is possible.
|
||||||
bool canFoldMemoryOperand(MachineInstr *MI,
|
bool canFoldMemoryOperand(MachineInstr *MI,
|
||||||
SmallVector<unsigned, 2> &Ops) const;
|
SmallVector<unsigned, 2> &Ops) const;
|
||||||
|
|
||||||
|
@ -691,6 +691,22 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// canFoldMemoryOperand - Returns true if the specified load / store
|
||||||
|
/// folding is possible.
|
||||||
|
bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
|
||||||
|
SmallVector<unsigned, 2> &Ops) const {
|
||||||
|
SmallVector<unsigned, 2> FoldOps;
|
||||||
|
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||||
|
unsigned OpIdx = Ops[i];
|
||||||
|
// FIXME: fold subreg use.
|
||||||
|
if (MI->getOperand(OpIdx).getSubReg())
|
||||||
|
return false;
|
||||||
|
FoldOps.push_back(OpIdx);
|
||||||
|
}
|
||||||
|
|
||||||
|
return mri_->canFoldMemoryOperand(MI, FoldOps);
|
||||||
|
}
|
||||||
|
|
||||||
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
|
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
|
||||||
SmallPtrSet<MachineBasicBlock*, 4> MBBs;
|
SmallPtrSet<MachineBasicBlock*, 4> MBBs;
|
||||||
for (LiveInterval::Ranges::const_iterator
|
for (LiveInterval::Ranges::const_iterator
|
||||||
@ -710,7 +726,7 @@ bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
|
|||||||
|
|
||||||
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
|
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
|
||||||
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
|
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
|
||||||
void LiveIntervals::
|
bool LiveIntervals::
|
||||||
rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
|
rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
|
||||||
unsigned id, unsigned index, unsigned end, MachineInstr *MI,
|
unsigned id, unsigned index, unsigned end, MachineInstr *MI,
|
||||||
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
|
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
|
||||||
@ -723,6 +739,7 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
|
|||||||
const LoopInfo *loopInfo,
|
const LoopInfo *loopInfo,
|
||||||
std::map<unsigned,unsigned> &MBBVRegsMap,
|
std::map<unsigned,unsigned> &MBBVRegsMap,
|
||||||
std::vector<LiveInterval*> &NewLIs) {
|
std::vector<LiveInterval*> &NewLIs) {
|
||||||
|
bool CanFold = false;
|
||||||
RestartInstruction:
|
RestartInstruction:
|
||||||
for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
|
for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
|
||||||
MachineOperand& mop = MI->getOperand(i);
|
MachineOperand& mop = MI->getOperand(i);
|
||||||
@ -760,11 +777,6 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not fold load / store here if we are splitting. We'll find an
|
|
||||||
// optimal point to insert a load / store later.
|
|
||||||
if (TryFold)
|
|
||||||
TryFold = !TrySplit && NewVReg == 0;
|
|
||||||
|
|
||||||
// Scan all of the operands of this instruction rewriting operands
|
// Scan all of the operands of this instruction rewriting operands
|
||||||
// to use NewVReg instead of li.reg as appropriate. We do this for
|
// to use NewVReg instead of li.reg as appropriate. We do this for
|
||||||
// two reasons:
|
// two reasons:
|
||||||
@ -795,15 +807,23 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TryFold &&
|
if (TryFold) {
|
||||||
tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
|
// Do not fold load / store here if we are splitting. We'll find an
|
||||||
Ops, FoldSS, FoldSlot, Reg)) {
|
// optimal point to insert a load / store later.
|
||||||
// Folding the load/store can completely change the instruction in
|
if (!TrySplit) {
|
||||||
// unpredictable ways, rescan it from the beginning.
|
if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
|
||||||
HasUse = false;
|
Ops, FoldSS, FoldSlot, Reg)) {
|
||||||
HasDef = false;
|
// Folding the load/store can completely change the instruction in
|
||||||
goto RestartInstruction;
|
// unpredictable ways, rescan it from the beginning.
|
||||||
}
|
HasUse = false;
|
||||||
|
HasDef = false;
|
||||||
|
CanFold = false;
|
||||||
|
goto RestartInstruction;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
CanFold = canFoldMemoryOperand(MI, Ops);
|
||||||
|
}
|
||||||
|
} else CanFold = false;
|
||||||
|
|
||||||
// Create a new virtual register for the spill interval.
|
// Create a new virtual register for the spill interval.
|
||||||
bool CreatedNewVReg = false;
|
bool CreatedNewVReg = false;
|
||||||
@ -879,8 +899,8 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
|
|||||||
nI.print(DOUT, mri_);
|
nI.print(DOUT, mri_);
|
||||||
DOUT << '\n';
|
DOUT << '\n';
|
||||||
}
|
}
|
||||||
|
return CanFold;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
|
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
|
||||||
const VNInfo *VNI,
|
const VNInfo *VNI,
|
||||||
MachineBasicBlock *MBB, unsigned Idx) const {
|
MachineBasicBlock *MBB, unsigned Idx) const {
|
||||||
@ -920,6 +940,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
|||||||
std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
|
std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
|
||||||
std::map<unsigned,unsigned> &MBBVRegsMap,
|
std::map<unsigned,unsigned> &MBBVRegsMap,
|
||||||
std::vector<LiveInterval*> &NewLIs) {
|
std::vector<LiveInterval*> &NewLIs) {
|
||||||
|
bool AllCanFold = true;
|
||||||
unsigned NewVReg = 0;
|
unsigned NewVReg = 0;
|
||||||
unsigned index = getBaseIndex(I->start);
|
unsigned index = getBaseIndex(I->start);
|
||||||
unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
|
unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
|
||||||
@ -931,12 +952,12 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
|||||||
|
|
||||||
MachineInstr *MI = getInstructionFromIndex(index);
|
MachineInstr *MI = getInstructionFromIndex(index);
|
||||||
MachineBasicBlock *MBB = MI->getParent();
|
MachineBasicBlock *MBB = MI->getParent();
|
||||||
NewVReg = 0;
|
unsigned ThisVReg = 0;
|
||||||
if (TrySplit) {
|
if (TrySplit) {
|
||||||
std::map<unsigned,unsigned>::const_iterator NVI =
|
std::map<unsigned,unsigned>::const_iterator NVI =
|
||||||
MBBVRegsMap.find(MBB->getNumber());
|
MBBVRegsMap.find(MBB->getNumber());
|
||||||
if (NVI != MBBVRegsMap.end()) {
|
if (NVI != MBBVRegsMap.end()) {
|
||||||
NewVReg = NVI->second;
|
ThisVReg = NVI->second;
|
||||||
// One common case:
|
// One common case:
|
||||||
// x = use
|
// x = use
|
||||||
// ...
|
// ...
|
||||||
@ -959,21 +980,35 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
|||||||
}
|
}
|
||||||
if (MIHasDef && !MIHasUse) {
|
if (MIHasDef && !MIHasUse) {
|
||||||
MBBVRegsMap.erase(MBB->getNumber());
|
MBBVRegsMap.erase(MBB->getNumber());
|
||||||
NewVReg = 0;
|
ThisVReg = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bool IsNew = NewVReg == 0;
|
|
||||||
|
bool IsNew = ThisVReg == 0;
|
||||||
|
if (IsNew) {
|
||||||
|
// This ends the previous live interval. If all of its def / use
|
||||||
|
// can be folded, give it a low spill weight.
|
||||||
|
if (NewVReg && TrySplit && AllCanFold) {
|
||||||
|
LiveInterval &nI = getOrCreateInterval(NewVReg);
|
||||||
|
nI.weight /= 10.0F;
|
||||||
|
}
|
||||||
|
AllCanFold = true;
|
||||||
|
}
|
||||||
|
NewVReg = ThisVReg;
|
||||||
|
|
||||||
bool HasDef = false;
|
bool HasDef = false;
|
||||||
bool HasUse = false;
|
bool HasUse = false;
|
||||||
rewriteInstructionForSpills(li, TrySplit, I->valno->id, index, end,
|
bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
|
||||||
MI, ReMatOrigDefMI, ReMatDefMI, Slot, LdSlot,
|
index, end, MI, ReMatOrigDefMI, ReMatDefMI,
|
||||||
isLoad, isLoadSS, DefIsReMat, CanDelete, vrm,
|
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
|
||||||
RegMap, rc, ReMatIds, NewVReg, HasDef, HasUse,
|
CanDelete, vrm, RegMap, rc, ReMatIds, NewVReg,
|
||||||
loopInfo, MBBVRegsMap, NewLIs);
|
HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
|
||||||
if (!HasDef && !HasUse)
|
if (!HasDef && !HasUse)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
AllCanFold &= CanFold;
|
||||||
|
|
||||||
// Update weight of spill interval.
|
// Update weight of spill interval.
|
||||||
LiveInterval &nI = getOrCreateInterval(NewVReg);
|
LiveInterval &nI = getOrCreateInterval(NewVReg);
|
||||||
if (!TrySplit) {
|
if (!TrySplit) {
|
||||||
@ -1058,6 +1093,12 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
|||||||
unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
|
unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
|
||||||
nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
|
nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (NewVReg && TrySplit && AllCanFold) {
|
||||||
|
// If all of its def / use can be folded, give it a low spill weight.
|
||||||
|
LiveInterval &nI = getOrCreateInterval(NewVReg);
|
||||||
|
nI.weight /= 10.0F;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
|
bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
|
||||||
@ -1331,8 +1372,14 @@ addIntervalsForSpills(const LiveInterval &li,
|
|||||||
// load / rematerialization for us.
|
// load / rematerialization for us.
|
||||||
if (Folded)
|
if (Folded)
|
||||||
nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
|
nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
|
||||||
else
|
else {
|
||||||
vrm.addRestorePoint(VReg, MI);
|
vrm.addRestorePoint(VReg, MI);
|
||||||
|
LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
|
||||||
|
MachineInstr *LastUse = getInstructionFromIndex(getBaseIndex(LR->end));
|
||||||
|
int UseIdx = LastUse->findRegisterUseOperandIdx(VReg);
|
||||||
|
assert(UseIdx != -1);
|
||||||
|
LastUse->getOperand(UseIdx).setIsKill();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Id = RestoreMBBs.find_next(Id);
|
Id = RestoreMBBs.find_next(Id);
|
||||||
}
|
}
|
||||||
|
@ -1295,8 +1295,6 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
|
|||||||
// the value and there isn't an earlier def that has already clobbered the
|
// the value and there isn't an earlier def that has already clobbered the
|
||||||
// physreg.
|
// physreg.
|
||||||
if (PhysReg &&
|
if (PhysReg &&
|
||||||
Spills.canClobberPhysReg(SS) &&
|
|
||||||
!ReusedOperands.isClobbered(PhysReg) &&
|
|
||||||
DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 &&
|
DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 &&
|
||||||
MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
|
MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
|
||||||
MBB.insert(MII, NewMIs[0]);
|
MBB.insert(MII, NewMIs[0]);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user