mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Use a transposed algorithm for handleMove().
Completely update one interval at a time instead of collecting live range fragments to be updated. This avoids building data structures, except for a single SmallPtrSet of updated intervals. Also share code between handleMove() and handleMoveIntoBundle(). Add support for moving dead defs across other live values in the interval. The MI scheduler can do that. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165824 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
780c798222
commit
ad5e969ba9
@ -1007,246 +1007,240 @@ private:
|
||||
LiveIntervals& LIS;
|
||||
const MachineRegisterInfo& MRI;
|
||||
const TargetRegisterInfo& TRI;
|
||||
SlotIndex OldIdx;
|
||||
SlotIndex NewIdx;
|
||||
|
||||
typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
|
||||
typedef DenseSet<IntRangePair> RangeSet;
|
||||
|
||||
struct RegRanges {
|
||||
LiveRange* Use;
|
||||
LiveRange* EC;
|
||||
LiveRange* Dead;
|
||||
LiveRange* Def;
|
||||
RegRanges() : Use(0), EC(0), Dead(0), Def(0) {}
|
||||
};
|
||||
typedef DenseMap<unsigned, RegRanges> BundleRanges;
|
||||
SmallPtrSet<LiveInterval*, 8> Updated;
|
||||
|
||||
public:
|
||||
HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
|
||||
const TargetRegisterInfo& TRI, SlotIndex NewIdx)
|
||||
: LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}
|
||||
const TargetRegisterInfo& TRI,
|
||||
SlotIndex OldIdx, SlotIndex NewIdx)
|
||||
: LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx) {}
|
||||
|
||||
// Update intervals for all operands of MI from OldIdx to NewIdx.
|
||||
// This assumes that MI used to be at OldIdx, and now resides at
|
||||
// NewIdx.
|
||||
void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) {
|
||||
assert(NewIdx != OldIdx && "No-op move? That's a bit strange.");
|
||||
|
||||
// Collect the operands.
|
||||
RangeSet Entering, Internal, Exiting;
|
||||
bool hasRegMaskOp = false;
|
||||
collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
|
||||
|
||||
// To keep the LiveRanges valid within an interval, move the ranges closest
|
||||
// to the destination first. This prevents ranges from overlapping, to that
|
||||
// APIs like removeRange still work.
|
||||
if (NewIdx < OldIdx) {
|
||||
moveAllEnteringFrom(OldIdx, Entering);
|
||||
moveAllInternalFrom(OldIdx, Internal);
|
||||
moveAllExitingFrom(OldIdx, Exiting);
|
||||
}
|
||||
else {
|
||||
moveAllExitingFrom(OldIdx, Exiting);
|
||||
moveAllInternalFrom(OldIdx, Internal);
|
||||
moveAllEnteringFrom(OldIdx, Entering);
|
||||
}
|
||||
|
||||
if (hasRegMaskOp)
|
||||
updateRegMaskSlots(OldIdx);
|
||||
|
||||
#ifndef NDEBUG
|
||||
LIValidator validator;
|
||||
validator = std::for_each(Entering.begin(), Entering.end(), validator);
|
||||
validator = std::for_each(Internal.begin(), Internal.end(), validator);
|
||||
validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
|
||||
assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness.");
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
// Update intervals for all operands of MI to refer to BundleStart's
|
||||
// SlotIndex.
|
||||
void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) {
|
||||
if (MI == BundleStart)
|
||||
return; // Bundling instr with itself - nothing to do.
|
||||
|
||||
SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI);
|
||||
assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI &&
|
||||
"SlotIndex <-> Instruction mapping broken for MI");
|
||||
|
||||
// Collect all ranges already in the bundle.
|
||||
MachineBasicBlock::instr_iterator BII(BundleStart);
|
||||
RangeSet Entering, Internal, Exiting;
|
||||
bool hasRegMaskOp = false;
|
||||
collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
|
||||
assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
|
||||
for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) {
|
||||
if (&*BII == MI)
|
||||
/// Update all live ranges touched by MI, assuming a move from OldIdx to
|
||||
/// NewIdx.
|
||||
void updateAllRanges(MachineInstr *MI) {
|
||||
DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI);
|
||||
bool hasRegMask = false;
|
||||
for (MIOperands MO(MI); MO.isValid(); ++MO) {
|
||||
if (MO->isRegMask())
|
||||
hasRegMask = true;
|
||||
if (!MO->isReg())
|
||||
continue;
|
||||
collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
|
||||
assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
|
||||
// Aggressively clear all kill flags.
|
||||
// They are reinserted by VirtRegRewriter.
|
||||
if (MO->isUse())
|
||||
MO->setIsKill(false);
|
||||
|
||||
unsigned Reg = MO->getReg();
|
||||
if (!Reg)
|
||||
continue;
|
||||
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
||||
updateRange(LIS.getInterval(Reg));
|
||||
continue;
|
||||
}
|
||||
|
||||
// For physregs, only update the regunits that actually have a
|
||||
// precomputed live range.
|
||||
for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
|
||||
if (LiveInterval *LI = LIS.getCachedRegUnit(*Units))
|
||||
updateRange(*LI);
|
||||
}
|
||||
|
||||
BundleRanges BR = createBundleRanges(Entering, Internal, Exiting);
|
||||
|
||||
Entering.clear();
|
||||
Internal.clear();
|
||||
Exiting.clear();
|
||||
collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
|
||||
assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
|
||||
|
||||
DEBUG(dbgs() << "Entering: " << Entering.size() << "\n");
|
||||
DEBUG(dbgs() << "Internal: " << Internal.size() << "\n");
|
||||
DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n");
|
||||
|
||||
moveAllEnteringFromInto(OldIdx, Entering, BR);
|
||||
moveAllInternalFromInto(OldIdx, Internal, BR);
|
||||
moveAllExitingFromInto(OldIdx, Exiting, BR);
|
||||
|
||||
|
||||
#ifndef NDEBUG
|
||||
LIValidator validator;
|
||||
validator = std::for_each(Entering.begin(), Entering.end(), validator);
|
||||
validator = std::for_each(Internal.begin(), Internal.end(), validator);
|
||||
validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
|
||||
assert(validator.rangesOk() && "moveAllOperandsInto broke liveness.");
|
||||
#endif
|
||||
if (hasRegMask)
|
||||
updateRegMaskSlots();
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
#ifndef NDEBUG
|
||||
class LIValidator {
|
||||
private:
|
||||
DenseSet<const LiveInterval*> Checked, Bogus;
|
||||
public:
|
||||
void operator()(const IntRangePair& P) {
|
||||
const LiveInterval* LI = P.first;
|
||||
if (Checked.count(LI))
|
||||
return;
|
||||
Checked.insert(LI);
|
||||
if (LI->empty())
|
||||
return;
|
||||
SlotIndex LastEnd = LI->begin()->start;
|
||||
for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
|
||||
LRI != LRE; ++LRI) {
|
||||
const LiveRange& LR = *LRI;
|
||||
if (LastEnd > LR.start || LR.start >= LR.end)
|
||||
Bogus.insert(LI);
|
||||
LastEnd = LR.end;
|
||||
}
|
||||
}
|
||||
|
||||
bool rangesOk() const {
|
||||
return Bogus.empty();
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
// Collect IntRangePairs for all operands of MI that may need fixing.
|
||||
// Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
|
||||
// maps).
|
||||
void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
|
||||
RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
|
||||
hasRegMaskOp = false;
|
||||
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
|
||||
MOE = MI->operands_end();
|
||||
MOI != MOE; ++MOI) {
|
||||
const MachineOperand& MO = *MOI;
|
||||
|
||||
if (MO.isRegMask()) {
|
||||
hasRegMaskOp = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!MO.isReg() || MO.getReg() == 0)
|
||||
continue;
|
||||
|
||||
unsigned Reg = MO.getReg();
|
||||
|
||||
// Don't track uses of reserved registers - they're not accurate.
|
||||
// Reserved register live ranges look like a set of dead defs.
|
||||
bool Resv =
|
||||
TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg);
|
||||
|
||||
// Collect ranges for register units. These live ranges are computed on
|
||||
// demand, so just skip any that haven't been computed yet.
|
||||
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
|
||||
for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
|
||||
if (LiveInterval *LI = LIS.getCachedRegUnit(*Units))
|
||||
collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx, Resv);
|
||||
} else {
|
||||
// Collect ranges for individual virtual registers.
|
||||
collectRanges(MO, &LIS.getInterval(Reg),
|
||||
Entering, Internal, Exiting, OldIdx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void collectRanges(const MachineOperand &MO, LiveInterval *LI,
|
||||
RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting,
|
||||
SlotIndex OldIdx, bool IgnoreReads = false) {
|
||||
if (!IgnoreReads && MO.readsReg()) {
|
||||
LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
|
||||
if (LR != 0)
|
||||
Entering.insert(std::make_pair(LI, LR));
|
||||
}
|
||||
if (MO.isDef()) {
|
||||
LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
|
||||
assert(LR != 0 && "No live range for def?");
|
||||
if (LR->end > OldIdx.getDeadSlot())
|
||||
Exiting.insert(std::make_pair(LI, LR));
|
||||
/// Update a single live range, assuming an instruction has been moved from
|
||||
/// OldIdx to NewIdx.
|
||||
void updateRange(LiveInterval &LI) {
|
||||
if (!Updated.insert(&LI))
|
||||
return;
|
||||
DEBUG({
|
||||
dbgs() << " ";
|
||||
if (TargetRegisterInfo::isVirtualRegister(LI.reg))
|
||||
dbgs() << PrintReg(LI.reg);
|
||||
else
|
||||
Internal.insert(std::make_pair(LI, LR));
|
||||
}
|
||||
dbgs() << PrintRegUnit(LI.reg, &TRI);
|
||||
dbgs() << ":\t" << LI << '\n';
|
||||
});
|
||||
if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
|
||||
handleMoveDown(LI);
|
||||
else
|
||||
handleMoveUp(LI);
|
||||
DEBUG(dbgs() << " -->\t" << LI << '\n');
|
||||
LI.verify();
|
||||
}
|
||||
|
||||
BundleRanges createBundleRanges(RangeSet& Entering,
|
||||
RangeSet& Internal,
|
||||
RangeSet& Exiting) {
|
||||
BundleRanges BR;
|
||||
/// Update LI to reflect an instruction has been moved downwards from OldIdx
|
||||
/// to NewIdx.
|
||||
///
|
||||
/// 1. Live def at OldIdx:
|
||||
/// Move def to NewIdx, assert endpoint after NewIdx.
|
||||
///
|
||||
/// 2. Live def at OldIdx, killed at NewIdx:
|
||||
/// Change to dead def at NewIdx.
|
||||
/// (Happens when bundling def+kill together).
|
||||
///
|
||||
/// 3. Dead def at OldIdx:
|
||||
/// Move def to NewIdx, possibly across another live value.
|
||||
///
|
||||
/// 4. Def at OldIdx AND at NewIdx:
|
||||
/// Remove live range [OldIdx;NewIdx) and value defined at OldIdx.
|
||||
/// (Happens when bundling multiple defs together).
|
||||
///
|
||||
/// 5. Value read at OldIdx, killed before NewIdx:
|
||||
/// Extend kill to NewIdx.
|
||||
///
|
||||
void handleMoveDown(LiveInterval &LI) {
|
||||
// First look for a kill at OldIdx.
|
||||
LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex());
|
||||
LiveInterval::iterator E = LI.end();
|
||||
// Is LI even live at OldIdx?
|
||||
if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
|
||||
return;
|
||||
|
||||
for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
|
||||
EI != EE; ++EI) {
|
||||
LiveInterval* LI = EI->first;
|
||||
LiveRange* LR = EI->second;
|
||||
BR[LI->reg].Use = LR;
|
||||
// Handle a live-in value.
|
||||
if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
|
||||
bool isKill = SlotIndex::isSameInstr(OldIdx, I->end);
|
||||
// If the live-in value already extends to NewIdx, there is nothing to do.
|
||||
if (!SlotIndex::isEarlierInstr(I->end, NewIdx))
|
||||
return;
|
||||
// Aggressively remove all kill flags from the old kill point.
|
||||
// Kill flags shouldn't be used while live intervals exist, they will be
|
||||
// reinserted by VirtRegRewriter.
|
||||
if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end))
|
||||
for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO)
|
||||
if (MO->isReg() && MO->isUse())
|
||||
MO->setIsKill(false);
|
||||
// Adjust I->end to reach NewIdx. This may temporarily make LI invalid by
|
||||
// overlapping ranges. Case 5 above.
|
||||
I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
|
||||
// If this was a kill, there may also be a def. Otherwise we're done.
|
||||
if (!isKill)
|
||||
return;
|
||||
++I;
|
||||
}
|
||||
|
||||
for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
|
||||
II != IE; ++II) {
|
||||
LiveInterval* LI = II->first;
|
||||
LiveRange* LR = II->second;
|
||||
if (LR->end.isDead()) {
|
||||
BR[LI->reg].Dead = LR;
|
||||
} else {
|
||||
BR[LI->reg].EC = LR;
|
||||
// Check for a def at OldIdx.
|
||||
if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start))
|
||||
return;
|
||||
// We have a def at OldIdx.
|
||||
VNInfo *DefVNI = I->valno;
|
||||
assert(DefVNI->def == I->start && "Inconsistent def");
|
||||
DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
|
||||
// If the defined value extends beyond NewIdx, just move the def down.
|
||||
// This is case 1 above.
|
||||
if (SlotIndex::isEarlierInstr(NewIdx, I->end)) {
|
||||
I->start = DefVNI->def;
|
||||
return;
|
||||
}
|
||||
// The remaining possibilities are now:
|
||||
// 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx).
|
||||
// 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot().
|
||||
// In either case, it is possible that there is an existing def at NewIdx.
|
||||
assert((I->end == OldIdx.getDeadSlot() ||
|
||||
SlotIndex::isSameInstr(I->end, NewIdx)) &&
|
||||
"Cannot move def below kill");
|
||||
LiveInterval::iterator NewI = LI.advanceTo(I, NewIdx.getRegSlot());
|
||||
if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) {
|
||||
// There is an existing def at NewIdx, case 4 above. The def at OldIdx is
|
||||
// coalesced into that value.
|
||||
assert(NewI->valno != DefVNI && "Multiple defs of value?");
|
||||
LI.removeValNo(DefVNI);
|
||||
return;
|
||||
}
|
||||
// There was no existing def at NewIdx. Turn *I into a dead def at NewIdx.
|
||||
// If the def at OldIdx was dead, we allow it to be moved across other LI
|
||||
// values. The new range should be placed immediately before NewI, move any
|
||||
// intermediate ranges up.
|
||||
assert(NewI != I && "Inconsistent iterators");
|
||||
std::copy(llvm::next(I), NewI, I);
|
||||
*llvm::prior(NewI) = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
|
||||
}
|
||||
|
||||
/// Update LI to reflect an instruction has been moved upwards from OldIdx
|
||||
/// to NewIdx.
|
||||
///
|
||||
/// 1. Live def at OldIdx:
|
||||
/// Hoist def to NewIdx.
|
||||
///
|
||||
/// 2. Dead def at OldIdx:
|
||||
/// Hoist def+end to NewIdx, possibly move across other values.
|
||||
///
|
||||
/// 3. Dead def at OldIdx AND existing def at NewIdx:
|
||||
/// Remove value defined at OldIdx, coalescing it with existing value.
|
||||
///
|
||||
/// 4. Live def at OldIdx AND existing def at NewIdx:
|
||||
/// Remove value defined at NewIdx, hoist OldIdx def to NewIdx.
|
||||
/// (Happens when bundling multiple defs together).
|
||||
///
|
||||
/// 5. Value killed at OldIdx:
|
||||
/// Hoist kill to NewIdx, then scan for last kill between NewIdx and
|
||||
/// OldIdx.
|
||||
///
|
||||
void handleMoveUp(LiveInterval &LI) {
|
||||
// First look for a kill at OldIdx.
|
||||
LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex());
|
||||
LiveInterval::iterator E = LI.end();
|
||||
// Is LI even live at OldIdx?
|
||||
if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
|
||||
return;
|
||||
|
||||
// Handle a live-in value.
|
||||
if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
|
||||
// If the live-in value isn't killed here, there is nothing to do.
|
||||
if (!SlotIndex::isSameInstr(OldIdx, I->end))
|
||||
return;
|
||||
// Adjust I->end to end at NewIdx. If we are hoisting a kill above
|
||||
// another use, we need to search for that use. Case 5 above.
|
||||
I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
|
||||
++I;
|
||||
// If OldIdx also defines a value, there couldn't have been another use.
|
||||
if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
|
||||
// No def, search for the new kill.
|
||||
// This can never be an early clobber kill since there is no def.
|
||||
llvm::prior(I)->end = findLastUseBefore(LI.reg).getRegSlot();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
|
||||
EI != EE; ++EI) {
|
||||
LiveInterval* LI = EI->first;
|
||||
LiveRange* LR = EI->second;
|
||||
BR[LI->reg].Def = LR;
|
||||
// Now deal with the def at OldIdx.
|
||||
assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?");
|
||||
VNInfo *DefVNI = I->valno;
|
||||
assert(DefVNI->def == I->start && "Inconsistent def");
|
||||
DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
|
||||
|
||||
// Check for an existing def at NewIdx.
|
||||
LiveInterval::iterator NewI = LI.find(NewIdx.getRegSlot());
|
||||
if (SlotIndex::isSameInstr(NewI->start, NewIdx)) {
|
||||
assert(NewI->valno != DefVNI && "Same value defined more than once?");
|
||||
// There is an existing def at NewIdx.
|
||||
if (I->end.isDead()) {
|
||||
// Case 3: Remove the dead def at OldIdx.
|
||||
LI.removeValNo(DefVNI);
|
||||
return;
|
||||
}
|
||||
// Case 4: Replace def at NewIdx with live def at OldIdx.
|
||||
I->start = DefVNI->def;
|
||||
LI.removeValNo(NewI->valno);
|
||||
return;
|
||||
}
|
||||
|
||||
return BR;
|
||||
// There is no existing def at NewIdx. Hoist DefVNI.
|
||||
if (!I->end.isDead()) {
|
||||
// Leave the end point of a live def.
|
||||
I->start = DefVNI->def;
|
||||
return;
|
||||
}
|
||||
|
||||
// DefVNI is a dead def. It may have been moved across other values in LI,
|
||||
// so move I up to NewI. Slide [NewI;I) down one position.
|
||||
std::copy_backward(NewI, I, llvm::next(I));
|
||||
*NewI = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
|
||||
}
|
||||
|
||||
void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
|
||||
MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
|
||||
if (!OldKillMI->killsRegister(reg))
|
||||
return; // Bail out if we don't have kill flags on the old register.
|
||||
MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
|
||||
assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
|
||||
assert(!NewKillMI->killsRegister(reg) &&
|
||||
"New kill instr is already a kill.");
|
||||
OldKillMI->clearRegisterKills(reg, &TRI);
|
||||
NewKillMI->addRegisterKilled(reg, &TRI);
|
||||
}
|
||||
|
||||
void updateRegMaskSlots(SlotIndex OldIdx) {
|
||||
void updateRegMaskSlots() {
|
||||
SmallVectorImpl<SlotIndex>::iterator RI =
|
||||
std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
|
||||
OldIdx);
|
||||
@ -1257,7 +1251,7 @@ private:
|
||||
}
|
||||
|
||||
// Return the last use of reg between NewIdx and OldIdx.
|
||||
SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
|
||||
SlotIndex findLastUseBefore(unsigned Reg) {
|
||||
SlotIndex LastUse = NewIdx;
|
||||
|
||||
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
||||
@ -1291,233 +1285,25 @@ private:
|
||||
}
|
||||
return LastUse;
|
||||
}
|
||||
|
||||
void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
|
||||
LiveInterval* LI = P.first;
|
||||
LiveRange* LR = P.second;
|
||||
bool LiveThrough = LR->end > OldIdx.getRegSlot();
|
||||
if (LiveThrough)
|
||||
return;
|
||||
SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
|
||||
if (LastUse != NewIdx)
|
||||
moveKillFlags(LI->reg, NewIdx, LastUse);
|
||||
LR->end = LastUse.getRegSlot(LR->end.isEarlyClobber());
|
||||
}
|
||||
|
||||
void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
|
||||
LiveInterval* LI = P.first;
|
||||
LiveRange* LR = P.second;
|
||||
// Extend the LiveRange if NewIdx is past the end.
|
||||
if (NewIdx > LR->end) {
|
||||
// Move kill flags if OldIdx was not originally the end
|
||||
// (otherwise LR->end points to an invalid slot).
|
||||
if (LR->end.getRegSlot() != OldIdx.getRegSlot()) {
|
||||
assert(LR->end > OldIdx && "LiveRange does not cover original slot");
|
||||
moveKillFlags(LI->reg, LR->end, NewIdx);
|
||||
}
|
||||
LR->end = NewIdx.getRegSlot(LR->end.isEarlyClobber());
|
||||
}
|
||||
}
|
||||
|
||||
void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
|
||||
bool GoingUp = NewIdx < OldIdx;
|
||||
|
||||
if (GoingUp) {
|
||||
for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
|
||||
EI != EE; ++EI)
|
||||
moveEnteringUpFrom(OldIdx, *EI);
|
||||
} else {
|
||||
for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
|
||||
EI != EE; ++EI)
|
||||
moveEnteringDownFrom(OldIdx, *EI);
|
||||
}
|
||||
}
|
||||
|
||||
void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
|
||||
LiveInterval* LI = P.first;
|
||||
LiveRange* LR = P.second;
|
||||
assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
|
||||
LR->end <= OldIdx.getDeadSlot() &&
|
||||
"Range should be internal to OldIdx.");
|
||||
LiveRange Tmp(*LR);
|
||||
Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
|
||||
Tmp.valno->def = Tmp.start;
|
||||
Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
|
||||
LI->removeRange(*LR);
|
||||
LI->addRange(Tmp);
|
||||
}
|
||||
|
||||
void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
|
||||
for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
|
||||
II != IE; ++II)
|
||||
moveInternalFrom(OldIdx, *II);
|
||||
}
|
||||
|
||||
void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
|
||||
LiveRange* LR = P.second;
|
||||
assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
|
||||
"Range should start in OldIdx.");
|
||||
assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
|
||||
SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
|
||||
LR->start = NewStart;
|
||||
LR->valno->def = NewStart;
|
||||
}
|
||||
|
||||
void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
|
||||
for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
|
||||
EI != EE; ++EI)
|
||||
moveExitingFrom(OldIdx, *EI);
|
||||
}
|
||||
|
||||
void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P,
|
||||
BundleRanges& BR) {
|
||||
LiveInterval* LI = P.first;
|
||||
LiveRange* LR = P.second;
|
||||
bool LiveThrough = LR->end > OldIdx.getRegSlot();
|
||||
if (LiveThrough) {
|
||||
assert((LR->start < NewIdx || BR[LI->reg].Def == LR) &&
|
||||
"Def in bundle should be def range.");
|
||||
assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
|
||||
"If bundle has use for this reg it should be LR.");
|
||||
BR[LI->reg].Use = LR;
|
||||
return;
|
||||
}
|
||||
|
||||
SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
|
||||
moveKillFlags(LI->reg, OldIdx, LastUse);
|
||||
|
||||
if (LR->start < NewIdx) {
|
||||
// Becoming a new entering range.
|
||||
assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 &&
|
||||
"Bundle shouldn't be re-defining reg mid-range.");
|
||||
assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
|
||||
"Bundle shouldn't have different use range for same reg.");
|
||||
LR->end = LastUse.getRegSlot();
|
||||
BR[LI->reg].Use = LR;
|
||||
} else {
|
||||
// Becoming a new Dead-def.
|
||||
assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) &&
|
||||
"Live range starting at unexpected slot.");
|
||||
assert(BR[LI->reg].Def == LR && "Reg should have def range.");
|
||||
assert(BR[LI->reg].Dead == 0 &&
|
||||
"Can't have def and dead def of same reg in a bundle.");
|
||||
LR->end = LastUse.getDeadSlot();
|
||||
BR[LI->reg].Dead = BR[LI->reg].Def;
|
||||
BR[LI->reg].Def = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P,
|
||||
BundleRanges& BR) {
|
||||
LiveInterval* LI = P.first;
|
||||
LiveRange* LR = P.second;
|
||||
if (NewIdx > LR->end) {
|
||||
// Range extended to bundle. Add to bundle uses.
|
||||
// Note: Currently adds kill flags to bundle start.
|
||||
assert(BR[LI->reg].Use == 0 &&
|
||||
"Bundle already has use range for reg.");
|
||||
moveKillFlags(LI->reg, LR->end, NewIdx);
|
||||
LR->end = NewIdx.getRegSlot();
|
||||
BR[LI->reg].Use = LR;
|
||||
} else {
|
||||
assert(BR[LI->reg].Use != 0 &&
|
||||
"Bundle should already have a use range for reg.");
|
||||
}
|
||||
}
|
||||
|
||||
void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering,
|
||||
BundleRanges& BR) {
|
||||
bool GoingUp = NewIdx < OldIdx;
|
||||
|
||||
if (GoingUp) {
|
||||
for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
|
||||
EI != EE; ++EI)
|
||||
moveEnteringUpFromInto(OldIdx, *EI, BR);
|
||||
} else {
|
||||
for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
|
||||
EI != EE; ++EI)
|
||||
moveEnteringDownFromInto(OldIdx, *EI, BR);
|
||||
}
|
||||
}
|
||||
|
||||
void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P,
|
||||
BundleRanges& BR) {
|
||||
// TODO: Sane rules for moving ranges into bundles.
|
||||
}
|
||||
|
||||
void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal,
|
||||
BundleRanges& BR) {
|
||||
for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
|
||||
II != IE; ++II)
|
||||
moveInternalFromInto(OldIdx, *II, BR);
|
||||
}
|
||||
|
||||
void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P,
|
||||
BundleRanges& BR) {
|
||||
LiveInterval* LI = P.first;
|
||||
LiveRange* LR = P.second;
|
||||
|
||||
assert(LR->start.isRegister() &&
|
||||
"Don't know how to merge exiting ECs into bundles yet.");
|
||||
|
||||
if (LR->end > NewIdx.getDeadSlot()) {
|
||||
// This range is becoming an exiting range on the bundle.
|
||||
// If there was an old dead-def of this reg, delete it.
|
||||
if (BR[LI->reg].Dead != 0) {
|
||||
LI->removeRange(*BR[LI->reg].Dead);
|
||||
BR[LI->reg].Dead = 0;
|
||||
}
|
||||
assert(BR[LI->reg].Def == 0 &&
|
||||
"Can't have two defs for the same variable exiting a bundle.");
|
||||
LR->start = NewIdx.getRegSlot();
|
||||
LR->valno->def = LR->start;
|
||||
BR[LI->reg].Def = LR;
|
||||
} else {
|
||||
// This range is becoming internal to the bundle.
|
||||
assert(LR->end == NewIdx.getRegSlot() &&
|
||||
"Can't bundle def whose kill is before the bundle");
|
||||
if (BR[LI->reg].Dead || BR[LI->reg].Def) {
|
||||
// Already have a def for this. Just delete range.
|
||||
LI->removeRange(*LR);
|
||||
} else {
|
||||
// Make range dead, record.
|
||||
LR->end = NewIdx.getDeadSlot();
|
||||
BR[LI->reg].Dead = LR;
|
||||
assert(BR[LI->reg].Use == LR &&
|
||||
"Range becoming dead should currently be use.");
|
||||
}
|
||||
// In both cases the range is no longer a use on the bundle.
|
||||
BR[LI->reg].Use = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting,
|
||||
BundleRanges& BR) {
|
||||
for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
|
||||
EI != EE; ++EI)
|
||||
moveExitingFromInto(OldIdx, *EI, BR);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
void LiveIntervals::handleMove(MachineInstr* MI) {
|
||||
assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
|
||||
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
|
||||
Indexes->removeMachineInstrFromMaps(MI);
|
||||
SlotIndex NewIndex = MI->isInsideBundle() ?
|
||||
Indexes->getInstructionIndex(MI) :
|
||||
Indexes->insertMachineInstrInMaps(MI);
|
||||
SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
|
||||
assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
|
||||
OldIndex < getMBBEndIdx(MI->getParent()) &&
|
||||
"Cannot handle moves across basic block boundaries.");
|
||||
assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
|
||||
|
||||
HMEditor HME(*this, *MRI, *TRI, NewIndex);
|
||||
HME.moveAllRangesFrom(MI, OldIndex);
|
||||
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex);
|
||||
HME.updateAllRanges(MI);
|
||||
}
|
||||
|
||||
void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
|
||||
MachineInstr* BundleStart) {
|
||||
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
|
||||
SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
|
||||
HMEditor HME(*this, *MRI, *TRI, NewIndex);
|
||||
HME.moveAllRangesInto(MI, BundleStart);
|
||||
HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex);
|
||||
HME.updateAllRanges(MI);
|
||||
}
|
||||
|
73
test/CodeGen/X86/handle-move.ll
Normal file
73
test/CodeGen/X86/handle-move.ll
Normal file
@ -0,0 +1,73 @@
|
||||
; RUN: llc -march=x86-64 -mcpu=core2 -fast-isel -enable-misched -misched=shuffle -misched-bottomup -verify-machineinstrs < %s
|
||||
; RUN: llc -march=x86-64 -mcpu=core2 -fast-isel -enable-misched -misched=shuffle -misched-topdown -verify-machineinstrs < %s
|
||||
;
|
||||
; Test the LiveIntervals::handleMove() function.
|
||||
;
|
||||
; Moving the DIV32r instruction exercises the regunit update code because
|
||||
; %EDX has a live range into the function and is used by the DIV32r.
|
||||
;
|
||||
; Here sinking a kill + dead def:
|
||||
; 144B -> 180B: DIV32r %vreg4, %EAX<imp-def>, %EDX<imp-def,dead>, %EFLAGS<imp-def,dead>, %EAX<imp-use,kill>, %EDX<imp-use>
|
||||
; %vreg4: [48r,144r:0) 0@48r
|
||||
; --> [48r,180r:0) 0@48r
|
||||
; DH: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r
|
||||
; --> [0B,16r:0)[128r,180r:2)[180r,180d:1) 0@0B-phi 1@180r 2@128r
|
||||
; DL: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r
|
||||
; --> [0B,16r:0)[128r,180r:2)[180r,180d:1) 0@0B-phi 1@180r 2@128r
|
||||
;
|
||||
define i32 @f1(i32 %a, i32 %b, i32 %c, i32 %d) nounwind uwtable readnone ssp {
|
||||
entry:
|
||||
%y = add i32 %c, 1
|
||||
%x = udiv i32 %b, %a
|
||||
%add = add nsw i32 %y, %x
|
||||
ret i32 %add
|
||||
}
|
||||
|
||||
; Same as above, but moving a kill + live def:
|
||||
; 144B -> 180B: DIV32r %vreg4, %EAX<imp-def,dead>, %EDX<imp-def>, %EFLAGS<imp-def,dead>, %EAX<imp-use,kill>, %EDX<imp-use>
|
||||
; %vreg4: [48r,144r:0) 0@48r
|
||||
; --> [48r,180r:0) 0@48r
|
||||
; DH: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r
|
||||
; --> [0B,16r:0)[128r,180r:2)[180r,184r:1) 0@0B-phi 1@180r 2@128r
|
||||
; DL: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r
|
||||
; --> [0B,16r:0)[128r,180r:2)[180r,184r:1) 0@0B-phi 1@180r 2@128r
|
||||
;
|
||||
define i32 @f2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind uwtable readnone ssp {
|
||||
entry:
|
||||
%y = sub i32 %c, %d
|
||||
%x = urem i32 %b, %a
|
||||
%add = add nsw i32 %x, %y
|
||||
ret i32 %add
|
||||
}
|
||||
|
||||
; Moving a use below the existing kill (%vreg5):
|
||||
; Moving a tied virtual register def (%vreg11):
|
||||
;
|
||||
; 96B -> 120B: %vreg11<def,tied1> = SUB32rr %vreg11<tied0>, %vreg5
|
||||
; %vreg11: [80r,96r:1)[96r,144r:0) 0@96r 1@80r
|
||||
; --> [80r,120r:1)[120r,144r:0) 0@120r 1@80r
|
||||
; %vreg5: [16r,112r:0) 0@16r
|
||||
; --> [16r,120r:0) 0@16r
|
||||
;
|
||||
define i32 @f3(i32 %a, i32 %b, i32 %c, i32 %d) nounwind uwtable readnone ssp {
|
||||
entry:
|
||||
%y = sub i32 %a, %b
|
||||
%x = add i32 %a, %b
|
||||
%r = mul i32 %x, %y
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; Move EFLAGS dead def across another def:
|
||||
; handleMove 208B -> 36B: %EDX<def> = MOV32r0 %EFLAGS<imp-def,dead>
|
||||
; EFLAGS: [20r,20d:4)[160r,160d:3)[208r,208d:0)[224r,224d:1)[272r,272d:2)[304r,304d:5) 0@208r 1@224r 2@272r 3@160r 4@20r 5@304r
|
||||
; --> [20r,20d:4)[36r,36d:0)[160r,160d:3)[224r,224d:1)[272r,272d:2)[304r,304d:5) 0@36r 1@224r 2@272r 3@160r 4@20r 5@304r
|
||||
;
|
||||
define i32 @f4(i32 %a, i32 %b, i32 %c, i32 %d) nounwind uwtable readnone ssp {
|
||||
entry:
|
||||
%x = sub i32 %a, %b
|
||||
%y = sub i32 %b, %c
|
||||
%z = sub i32 %c, %d
|
||||
%r1 = udiv i32 %x, %y
|
||||
%r2 = mul i32 %z, %r1
|
||||
ret i32 %r2
|
||||
}
|
Loading…
Reference in New Issue
Block a user