mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-04 02:24:29 +00:00
VirtRegRewriter spring cleaning. No functional change.
Move methods out of line and M-x whitespace-cleanup. Promote common method arguments to member variables. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@98207 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@ -1037,33 +1037,80 @@ void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg,
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct RefSorter {
|
||||
bool operator()(const std::pair<MachineInstr*, int> &A,
|
||||
const std::pair<MachineInstr*, int> &B) {
|
||||
return A.second < B.second;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// ***************************** //
|
||||
// Local Spiller Implementation //
|
||||
// ***************************** //
|
||||
|
||||
namespace {
|
||||
|
||||
class LocalRewriter : public VirtRegRewriter {
|
||||
MachineRegisterInfo *RegInfo;
|
||||
MachineRegisterInfo *MRI;
|
||||
const TargetRegisterInfo *TRI;
|
||||
const TargetInstrInfo *TII;
|
||||
VirtRegMap *VRM;
|
||||
BitVector AllocatableRegs;
|
||||
DenseMap<MachineInstr*, unsigned> DistanceMap;
|
||||
|
||||
MachineBasicBlock *MBB; // Basic block currently being processed.
|
||||
|
||||
public:
|
||||
|
||||
bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
|
||||
LiveIntervals* LIs);
|
||||
|
||||
private:
|
||||
|
||||
bool OptimizeByUnfold2(unsigned VirtReg, int SS,
|
||||
MachineBasicBlock::iterator &MII,
|
||||
std::vector<MachineInstr*> &MaybeDeadStores,
|
||||
AvailableSpills &Spills,
|
||||
BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps);
|
||||
|
||||
bool OptimizeByUnfold(MachineBasicBlock::iterator &MII,
|
||||
std::vector<MachineInstr*> &MaybeDeadStores,
|
||||
AvailableSpills &Spills,
|
||||
BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps);
|
||||
|
||||
bool CommuteToFoldReload(MachineBasicBlock::iterator &MII,
|
||||
unsigned VirtReg, unsigned SrcReg, int SS,
|
||||
AvailableSpills &Spills,
|
||||
BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps,
|
||||
const TargetRegisterInfo *TRI);
|
||||
|
||||
void SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
|
||||
int Idx, unsigned PhysReg, int StackSlot,
|
||||
const TargetRegisterClass *RC,
|
||||
bool isAvailable, MachineInstr *&LastStore,
|
||||
AvailableSpills &Spills,
|
||||
SmallSet<MachineInstr*, 4> &ReMatDefs,
|
||||
BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps);
|
||||
|
||||
void TransferDeadness(unsigned CurDist,
|
||||
unsigned Reg, BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps);
|
||||
|
||||
void RewriteMBB(LiveIntervals *LIs,
|
||||
AvailableSpills &Spills, BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps);
|
||||
};
|
||||
}
|
||||
|
||||
bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
|
||||
LiveIntervals* LIs) {
|
||||
RegInfo = &MF.getRegInfo();
|
||||
MRI = &MF.getRegInfo();
|
||||
TRI = MF.getTarget().getRegisterInfo();
|
||||
TII = MF.getTarget().getInstrInfo();
|
||||
VRM = &vrm;
|
||||
AllocatableRegs = TRI->getAllocatableSet(MF);
|
||||
DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
|
||||
<< MF.getFunction()->getName() << "':\n");
|
||||
@ -1092,9 +1139,9 @@ public:
|
||||
SmallPtrSet<MachineBasicBlock*,16> >
|
||||
DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
|
||||
DFI != E; ++DFI) {
|
||||
MachineBasicBlock *MBB = *DFI;
|
||||
MBB = *DFI;
|
||||
if (!EarlyVisited.count(MBB))
|
||||
RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
|
||||
RewriteMBB(LIs, Spills, RegKills, KillOps);
|
||||
|
||||
// If this MBB is the only predecessor of a successor. Keep the
|
||||
// availability information and visit it next.
|
||||
@ -1110,7 +1157,7 @@ public:
|
||||
MBB = SinglePredSuccs[0];
|
||||
if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
|
||||
Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
|
||||
RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
|
||||
RewriteMBB(LIs, Spills, RegKills, KillOps);
|
||||
}
|
||||
}
|
||||
} while (MBB);
|
||||
@ -1124,10 +1171,10 @@ public:
|
||||
|
||||
// Mark unused spill slots.
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
int SS = VRM.getLowSpillSlot();
|
||||
int SS = VRM->getLowSpillSlot();
|
||||
if (SS != VirtRegMap::NO_STACK_SLOT)
|
||||
for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
|
||||
if (!VRM.isSpillSlotUsed(SS)) {
|
||||
for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS)
|
||||
if (!VRM->isSpillSlotUsed(SS)) {
|
||||
MFI->RemoveStackObject(SS);
|
||||
++NumDSS;
|
||||
}
|
||||
@ -1135,8 +1182,6 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
|
||||
/// a scratch register is available.
|
||||
/// xorq %r12<kill>, %r13
|
||||
@ -1148,17 +1193,16 @@ private:
|
||||
/// addq %rax, %r12
|
||||
/// addq %r13, %r12
|
||||
/// movq %r12, -184(%rbp)
|
||||
bool OptimizeByUnfold2(unsigned VirtReg, int SS,
|
||||
MachineBasicBlock &MBB,
|
||||
bool LocalRewriter::
|
||||
OptimizeByUnfold2(unsigned VirtReg, int SS,
|
||||
MachineBasicBlock::iterator &MII,
|
||||
std::vector<MachineInstr*> &MaybeDeadStores,
|
||||
AvailableSpills &Spills,
|
||||
BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps,
|
||||
VirtRegMap &VRM) {
|
||||
std::vector<MachineOperand*> &KillOps) {
|
||||
|
||||
MachineBasicBlock::iterator NextMII = llvm::next(MII);
|
||||
if (NextMII == MBB.end())
|
||||
if (NextMII == MBB->end())
|
||||
return false;
|
||||
|
||||
if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
|
||||
@ -1166,28 +1210,28 @@ private:
|
||||
|
||||
// Now let's see if the last couple of instructions happens to have freed up
|
||||
// a register.
|
||||
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
|
||||
unsigned PhysReg = FindFreeRegister(MII, MBB, RC, TRI, AllocatableRegs);
|
||||
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
|
||||
unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs);
|
||||
if (!PhysReg)
|
||||
return false;
|
||||
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineFunction &MF = *MBB->getParent();
|
||||
TRI = MF.getTarget().getRegisterInfo();
|
||||
MachineInstr &MI = *MII;
|
||||
if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, VRM))
|
||||
if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM))
|
||||
return false;
|
||||
|
||||
// If the next instruction also folds the same SS modref and can be unfoled,
|
||||
// then it's worthwhile to issue a load from SS into the free register and
|
||||
// then unfold these instructions.
|
||||
if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM))
|
||||
if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM))
|
||||
return false;
|
||||
|
||||
// Back-schedule reloads and remats.
|
||||
ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, false, SS, TII, MF);
|
||||
ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF);
|
||||
|
||||
// Load from SS to the spare physical register.
|
||||
TII->loadRegFromStackSlot(MBB, MII, PhysReg, SS, RC);
|
||||
TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC);
|
||||
// This invalidates Phys.
|
||||
Spills.ClobberPhysReg(PhysReg);
|
||||
// Remember it's available.
|
||||
@ -1200,11 +1244,11 @@ private:
|
||||
llvm_unreachable("Unable unfold the load / store folding instruction!");
|
||||
assert(NewMIs.size() == 1);
|
||||
AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
|
||||
VRM.transferRestorePts(&MI, NewMIs[0]);
|
||||
MII = MBB.insert(MII, NewMIs[0]);
|
||||
VRM->transferRestorePts(&MI, NewMIs[0]);
|
||||
MII = MBB->insert(MII, NewMIs[0]);
|
||||
InvalidateKills(MI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
++NumModRefUnfold;
|
||||
|
||||
// Unfold next instructions that fold the same SS.
|
||||
@ -1216,21 +1260,21 @@ private:
|
||||
llvm_unreachable("Unable unfold the load / store folding instruction!");
|
||||
assert(NewMIs.size() == 1);
|
||||
AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
|
||||
VRM.transferRestorePts(&NextMI, NewMIs[0]);
|
||||
MBB.insert(NextMII, NewMIs[0]);
|
||||
VRM->transferRestorePts(&NextMI, NewMIs[0]);
|
||||
MBB->insert(NextMII, NewMIs[0]);
|
||||
InvalidateKills(NextMI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&NextMI);
|
||||
MBB.erase(&NextMI);
|
||||
VRM->RemoveMachineInstrFromMaps(&NextMI);
|
||||
MBB->erase(&NextMI);
|
||||
++NumModRefUnfold;
|
||||
if (NextMII == MBB.end())
|
||||
if (NextMII == MBB->end())
|
||||
break;
|
||||
} while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM));
|
||||
} while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM));
|
||||
|
||||
// Store the value back into SS.
|
||||
TII->storeRegToStackSlot(MBB, NextMII, PhysReg, true, SS, RC);
|
||||
TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC);
|
||||
MachineInstr *StoreMI = prior(NextMII);
|
||||
VRM.addSpillSlotUse(SS, StoreMI);
|
||||
VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
|
||||
VRM->addSpillSlotUse(SS, StoreMI);
|
||||
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1247,21 +1291,20 @@ private:
|
||||
/// mov %eax, -32(%ebp)
|
||||
/// This enables unfolding optimization for a subsequent instruction which will
|
||||
/// also eliminate the newly introduced store instruction.
|
||||
bool OptimizeByUnfold(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &MII,
|
||||
bool LocalRewriter::
|
||||
OptimizeByUnfold(MachineBasicBlock::iterator &MII,
|
||||
std::vector<MachineInstr*> &MaybeDeadStores,
|
||||
AvailableSpills &Spills,
|
||||
BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps,
|
||||
VirtRegMap &VRM) {
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
std::vector<MachineOperand*> &KillOps) {
|
||||
MachineFunction &MF = *MBB->getParent();
|
||||
MachineInstr &MI = *MII;
|
||||
unsigned UnfoldedOpc = 0;
|
||||
unsigned UnfoldPR = 0;
|
||||
unsigned UnfoldVR = 0;
|
||||
int FoldedSS = VirtRegMap::NO_STACK_SLOT;
|
||||
VirtRegMap::MI2VirtMapTy::const_iterator I, End;
|
||||
for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
|
||||
for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
|
||||
// Only transform a MI that folds a single register.
|
||||
if (UnfoldedOpc)
|
||||
return false;
|
||||
@ -1270,11 +1313,11 @@ private:
|
||||
// MI2VirtMap be can updated which invalidate the iterator.
|
||||
// Increment the iterator first.
|
||||
++I;
|
||||
if (VRM.isAssignedReg(UnfoldVR))
|
||||
if (VRM->isAssignedReg(UnfoldVR))
|
||||
continue;
|
||||
// If this reference is not a use, any previous store is now dead.
|
||||
// Otherwise, the store to this stack slot is not dead anymore.
|
||||
FoldedSS = VRM.getStackSlot(UnfoldVR);
|
||||
FoldedSS = VRM->getStackSlot(UnfoldVR);
|
||||
MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
|
||||
if (DeadStore && (MR & VirtRegMap::isModRef)) {
|
||||
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
|
||||
@ -1291,8 +1334,8 @@ private:
|
||||
return false;
|
||||
|
||||
// Look for other unfolding opportunities.
|
||||
return OptimizeByUnfold2(UnfoldVR, FoldedSS, MBB, MII,
|
||||
MaybeDeadStores, Spills, RegKills, KillOps, VRM);
|
||||
return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills,
|
||||
RegKills, KillOps);
|
||||
}
|
||||
|
||||
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
||||
@ -1302,21 +1345,21 @@ private:
|
||||
unsigned VirtReg = MO.getReg();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
|
||||
continue;
|
||||
if (VRM.isAssignedReg(VirtReg)) {
|
||||
unsigned PhysReg = VRM.getPhys(VirtReg);
|
||||
if (VRM->isAssignedReg(VirtReg)) {
|
||||
unsigned PhysReg = VRM->getPhys(VirtReg);
|
||||
if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
|
||||
return false;
|
||||
} else if (VRM.isReMaterialized(VirtReg))
|
||||
} else if (VRM->isReMaterialized(VirtReg))
|
||||
continue;
|
||||
int SS = VRM.getStackSlot(VirtReg);
|
||||
int SS = VRM->getStackSlot(VirtReg);
|
||||
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
|
||||
if (PhysReg) {
|
||||
if (TRI->regsOverlap(PhysReg, UnfoldPR))
|
||||
return false;
|
||||
continue;
|
||||
}
|
||||
if (VRM.hasPhys(VirtReg)) {
|
||||
PhysReg = VRM.getPhys(VirtReg);
|
||||
if (VRM->hasPhys(VirtReg)) {
|
||||
PhysReg = VRM->getPhys(VirtReg);
|
||||
if (!TRI->regsOverlap(PhysReg, UnfoldPR))
|
||||
continue;
|
||||
}
|
||||
@ -1337,14 +1380,14 @@ private:
|
||||
Ops.push_back(Idx);
|
||||
MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
|
||||
if (FoldedMI) {
|
||||
VRM.addSpillSlotUse(SS, FoldedMI);
|
||||
if (!VRM.hasPhys(UnfoldVR))
|
||||
VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
|
||||
VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
|
||||
MII = MBB.insert(MII, FoldedMI);
|
||||
VRM->addSpillSlotUse(SS, FoldedMI);
|
||||
if (!VRM->hasPhys(UnfoldVR))
|
||||
VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
|
||||
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
|
||||
MII = MBB->insert(MII, FoldedMI);
|
||||
InvalidateKills(MI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
MF.DeleteMachineInstr(NewMI);
|
||||
return true;
|
||||
}
|
||||
@ -1390,24 +1433,23 @@ private:
|
||||
/// If op is commutable and r2 is killed, then we can xform these to
|
||||
/// r2 = op r2, fi#1
|
||||
/// store r2, fi#1
|
||||
bool CommuteToFoldReload(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &MII,
|
||||
bool LocalRewriter::
|
||||
CommuteToFoldReload(MachineBasicBlock::iterator &MII,
|
||||
unsigned VirtReg, unsigned SrcReg, int SS,
|
||||
AvailableSpills &Spills,
|
||||
BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps,
|
||||
const TargetRegisterInfo *TRI,
|
||||
VirtRegMap &VRM) {
|
||||
if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
|
||||
const TargetRegisterInfo *TRI) {
|
||||
if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
|
||||
return false;
|
||||
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineFunction &MF = *MBB->getParent();
|
||||
MachineInstr &MI = *MII;
|
||||
MachineBasicBlock::iterator DefMII = prior(MII);
|
||||
MachineInstr *DefMI = DefMII;
|
||||
const TargetInstrDesc &TID = DefMI->getDesc();
|
||||
unsigned NewDstIdx;
|
||||
if (DefMII != MBB.begin() &&
|
||||
if (DefMII != MBB->begin() &&
|
||||
TID.isCommutable() &&
|
||||
CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
|
||||
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
|
||||
@ -1440,27 +1482,27 @@ private:
|
||||
if (!FoldedMI)
|
||||
return false;
|
||||
|
||||
VRM.addSpillSlotUse(SS, FoldedMI);
|
||||
VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
|
||||
VRM->addSpillSlotUse(SS, FoldedMI);
|
||||
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
|
||||
// Insert new def MI and spill MI.
|
||||
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
|
||||
TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
|
||||
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
|
||||
TII->storeRegToStackSlot(*MBB, &MI, NewReg, true, SS, RC);
|
||||
MII = prior(MII);
|
||||
MachineInstr *StoreMI = MII;
|
||||
VRM.addSpillSlotUse(SS, StoreMI);
|
||||
VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
|
||||
MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
|
||||
VRM->addSpillSlotUse(SS, StoreMI);
|
||||
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
|
||||
MII = MBB->insert(MII, FoldedMI); // Update MII to backtrack.
|
||||
|
||||
// Delete all 3 old instructions.
|
||||
InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(ReloadMI);
|
||||
MBB.erase(ReloadMI);
|
||||
VRM->RemoveMachineInstrFromMaps(ReloadMI);
|
||||
MBB->erase(ReloadMI);
|
||||
InvalidateKills(*DefMI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(DefMI);
|
||||
MBB.erase(DefMI);
|
||||
VRM->RemoveMachineInstrFromMaps(DefMI);
|
||||
MBB->erase(DefMI);
|
||||
InvalidateKills(MI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
|
||||
// If NewReg was previously holding value of some SS, it's now clobbered.
|
||||
// This has to be done now because it's a physical register. When this
|
||||
@ -1476,21 +1518,20 @@ private:
|
||||
|
||||
/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
|
||||
/// the last store to the same slot is now dead. If so, remove the last store.
|
||||
void SpillRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &MII,
|
||||
void LocalRewriter::
|
||||
SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
|
||||
int Idx, unsigned PhysReg, int StackSlot,
|
||||
const TargetRegisterClass *RC,
|
||||
bool isAvailable, MachineInstr *&LastStore,
|
||||
AvailableSpills &Spills,
|
||||
SmallSet<MachineInstr*, 4> &ReMatDefs,
|
||||
BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps,
|
||||
VirtRegMap &VRM) {
|
||||
std::vector<MachineOperand*> &KillOps) {
|
||||
|
||||
MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
|
||||
TII->storeRegToStackSlot(MBB, llvm::next(MII), PhysReg, true, StackSlot, RC);
|
||||
TII->storeRegToStackSlot(*MBB, llvm::next(MII), PhysReg, true, StackSlot, RC);
|
||||
MachineInstr *StoreMI = prior(oldNextMII);
|
||||
VRM.addSpillSlotUse(StackSlot, StoreMI);
|
||||
VRM->addSpillSlotUse(StackSlot, StoreMI);
|
||||
DEBUG(dbgs() << "Store:\t" << *StoreMI);
|
||||
|
||||
// If there is a dead store to this stack slot, nuke it now.
|
||||
@ -1500,11 +1541,11 @@ private:
|
||||
SmallVector<unsigned, 2> KillRegs;
|
||||
InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
|
||||
MachineBasicBlock::iterator PrevMII = LastStore;
|
||||
bool CheckDef = PrevMII != MBB.begin();
|
||||
bool CheckDef = PrevMII != MBB->begin();
|
||||
if (CheckDef)
|
||||
--PrevMII;
|
||||
VRM.RemoveMachineInstrFromMaps(LastStore);
|
||||
MBB.erase(LastStore);
|
||||
VRM->RemoveMachineInstrFromMaps(LastStore);
|
||||
MBB->erase(LastStore);
|
||||
if (CheckDef) {
|
||||
// Look at defs of killed registers on the store. Mark the defs
|
||||
// as dead since the store has been deleted and they aren't
|
||||
@ -1515,8 +1556,8 @@ private:
|
||||
MachineInstr *DeadDef = PrevMII;
|
||||
if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
|
||||
// FIXME: This assumes a remat def does not have side effects.
|
||||
VRM.RemoveMachineInstrFromMaps(DeadDef);
|
||||
MBB.erase(DeadDef);
|
||||
VRM->RemoveMachineInstrFromMaps(DeadDef);
|
||||
MBB->erase(DeadDef);
|
||||
++NumDRM;
|
||||
}
|
||||
}
|
||||
@ -1562,14 +1603,14 @@ private:
|
||||
|
||||
/// TransferDeadness - A identity copy definition is dead and it's being
|
||||
/// removed. Find the last def or use and mark it as dead / kill.
|
||||
void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
|
||||
void LocalRewriter::
|
||||
TransferDeadness(unsigned CurDist,
|
||||
unsigned Reg, BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps,
|
||||
VirtRegMap &VRM) {
|
||||
std::vector<MachineOperand*> &KillOps) {
|
||||
SmallPtrSet<MachineInstr*, 4> Seens;
|
||||
SmallVector<std::pair<MachineInstr*, int>,8> Refs;
|
||||
for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
|
||||
RE = RegInfo->reg_end(); RI != RE; ++RI) {
|
||||
for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg),
|
||||
RE = MRI->reg_end(); RI != RE; ++RI) {
|
||||
MachineInstr *UDMI = &*RI;
|
||||
if (UDMI->getParent() != MBB)
|
||||
continue;
|
||||
@ -1605,7 +1646,7 @@ private:
|
||||
LastUD->setIsDead();
|
||||
break;
|
||||
}
|
||||
VRM.RemoveMachineInstrFromMaps(LastUDMI);
|
||||
VRM->RemoveMachineInstrFromMaps(LastUDMI);
|
||||
MBB->erase(LastUDMI);
|
||||
} else {
|
||||
LastUD->setIsKill();
|
||||
@ -1618,15 +1659,15 @@ private:
|
||||
|
||||
/// rewriteMBB - Keep track of which spills are available even after the
|
||||
/// register allocator is done with them. If possible, avid reloading vregs.
|
||||
void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
|
||||
LiveIntervals *LIs,
|
||||
void
|
||||
LocalRewriter::RewriteMBB(LiveIntervals *LIs,
|
||||
AvailableSpills &Spills, BitVector &RegKills,
|
||||
std::vector<MachineOperand*> &KillOps) {
|
||||
|
||||
DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
|
||||
<< MBB.getName() << "':\n");
|
||||
<< MBB->getName() << "':\n");
|
||||
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineFunction &MF = *MBB->getParent();
|
||||
|
||||
// MaybeDeadStores - When we need to write a value back into a stack slot,
|
||||
// keep track of the inserted store. If the stack slot value is never read
|
||||
@ -1648,46 +1689,45 @@ private:
|
||||
|
||||
unsigned Dist = 0;
|
||||
DistanceMap.clear();
|
||||
for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
|
||||
for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
|
||||
MII != E; ) {
|
||||
MachineBasicBlock::iterator NextMII = llvm::next(MII);
|
||||
|
||||
VirtRegMap::MI2VirtMapTy::const_iterator I, End;
|
||||
bool Erased = false;
|
||||
bool BackTracked = false;
|
||||
if (OptimizeByUnfold(MBB, MII,
|
||||
MaybeDeadStores, Spills, RegKills, KillOps, VRM))
|
||||
if (OptimizeByUnfold(MII, MaybeDeadStores, Spills, RegKills, KillOps))
|
||||
NextMII = llvm::next(MII);
|
||||
|
||||
MachineInstr &MI = *MII;
|
||||
|
||||
if (VRM.hasEmergencySpills(&MI)) {
|
||||
if (VRM->hasEmergencySpills(&MI)) {
|
||||
// Spill physical register(s) in the rare case the allocator has run out
|
||||
// of registers to allocate.
|
||||
SmallSet<int, 4> UsedSS;
|
||||
std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
|
||||
std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(&MI);
|
||||
for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
|
||||
unsigned PhysReg = EmSpills[i];
|
||||
const TargetRegisterClass *RC =
|
||||
TRI->getPhysicalRegisterRegClass(PhysReg);
|
||||
assert(RC && "Unable to determine register class!");
|
||||
int SS = VRM.getEmergencySpillSlot(RC);
|
||||
int SS = VRM->getEmergencySpillSlot(RC);
|
||||
if (UsedSS.count(SS))
|
||||
llvm_unreachable("Need to spill more than one physical registers!");
|
||||
UsedSS.insert(SS);
|
||||
TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
|
||||
TII->storeRegToStackSlot(*MBB, MII, PhysReg, true, SS, RC);
|
||||
MachineInstr *StoreMI = prior(MII);
|
||||
VRM.addSpillSlotUse(SS, StoreMI);
|
||||
VRM->addSpillSlotUse(SS, StoreMI);
|
||||
|
||||
// Back-schedule reloads and remats.
|
||||
MachineBasicBlock::iterator InsertLoc =
|
||||
ComputeReloadLoc(llvm::next(MII), MBB.begin(), PhysReg, TRI, false,
|
||||
ComputeReloadLoc(llvm::next(MII), MBB->begin(), PhysReg, TRI, false,
|
||||
SS, TII, MF);
|
||||
|
||||
TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SS, RC);
|
||||
TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SS, RC);
|
||||
|
||||
MachineInstr *LoadMI = prior(InsertLoc);
|
||||
VRM.addSpillSlotUse(SS, LoadMI);
|
||||
VRM->addSpillSlotUse(SS, LoadMI);
|
||||
++NumPSpills;
|
||||
DistanceMap.insert(std::make_pair(LoadMI, Dist++));
|
||||
}
|
||||
@ -1695,14 +1735,14 @@ private:
|
||||
}
|
||||
|
||||
// Insert restores here if asked to.
|
||||
if (VRM.isRestorePt(&MI)) {
|
||||
std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
|
||||
if (VRM->isRestorePt(&MI)) {
|
||||
std::vector<unsigned> &RestoreRegs = VRM->getRestorePtRestores(&MI);
|
||||
for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
|
||||
unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
|
||||
if (!VRM.getPreSplitReg(VirtReg))
|
||||
if (!VRM->getPreSplitReg(VirtReg))
|
||||
continue; // Split interval spilled again.
|
||||
unsigned Phys = VRM.getPhys(VirtReg);
|
||||
RegInfo->setPhysRegUsed(Phys);
|
||||
unsigned Phys = VRM->getPhys(VirtReg);
|
||||
MRI->setPhysRegUsed(Phys);
|
||||
|
||||
// Check if the value being restored if available. If so, it must be
|
||||
// from a predecessor BB that fallthrough into this BB. We do not
|
||||
@ -1714,10 +1754,10 @@ private:
|
||||
// ... # r1 not clobbered
|
||||
// ...
|
||||
// = load fi#1
|
||||
bool DoReMat = VRM.isReMaterialized(VirtReg);
|
||||
bool DoReMat = VRM->isReMaterialized(VirtReg);
|
||||
int SSorRMId = DoReMat
|
||||
? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
|
||||
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
|
||||
? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
|
||||
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
|
||||
unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
|
||||
if (InReg == Phys) {
|
||||
// If the value is already available in the expected register, save
|
||||
@ -1749,10 +1789,10 @@ private:
|
||||
|
||||
// Back-schedule reloads and remats.
|
||||
MachineBasicBlock::iterator InsertLoc =
|
||||
ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat,
|
||||
ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat,
|
||||
SSorRMId, TII, MF);
|
||||
|
||||
TII->copyRegToReg(MBB, InsertLoc, Phys, InReg, RC, RC);
|
||||
TII->copyRegToReg(*MBB, InsertLoc, Phys, InReg, RC, RC);
|
||||
|
||||
// This invalidates Phys.
|
||||
Spills.ClobberPhysReg(Phys);
|
||||
@ -1773,16 +1813,16 @@ private:
|
||||
|
||||
// Back-schedule reloads and remats.
|
||||
MachineBasicBlock::iterator InsertLoc =
|
||||
ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat,
|
||||
ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat,
|
||||
SSorRMId, TII, MF);
|
||||
|
||||
if (VRM.isReMaterialized(VirtReg)) {
|
||||
ReMaterialize(MBB, InsertLoc, Phys, VirtReg, TII, TRI, VRM);
|
||||
if (VRM->isReMaterialized(VirtReg)) {
|
||||
ReMaterialize(*MBB, InsertLoc, Phys, VirtReg, TII, TRI, *VRM);
|
||||
} else {
|
||||
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
|
||||
TII->loadRegFromStackSlot(MBB, InsertLoc, Phys, SSorRMId, RC);
|
||||
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
|
||||
TII->loadRegFromStackSlot(*MBB, InsertLoc, Phys, SSorRMId, RC);
|
||||
MachineInstr *LoadMI = prior(InsertLoc);
|
||||
VRM.addSpillSlotUse(SSorRMId, LoadMI);
|
||||
VRM->addSpillSlotUse(SSorRMId, LoadMI);
|
||||
++NumLoads;
|
||||
DistanceMap.insert(std::make_pair(LoadMI, Dist++));
|
||||
}
|
||||
@ -1798,23 +1838,24 @@ private:
|
||||
}
|
||||
|
||||
// Insert spills here if asked to.
|
||||
if (VRM.isSpillPt(&MI)) {
|
||||
if (VRM->isSpillPt(&MI)) {
|
||||
std::vector<std::pair<unsigned,bool> > &SpillRegs =
|
||||
VRM.getSpillPtSpills(&MI);
|
||||
VRM->getSpillPtSpills(&MI);
|
||||
for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
|
||||
unsigned VirtReg = SpillRegs[i].first;
|
||||
bool isKill = SpillRegs[i].second;
|
||||
if (!VRM.getPreSplitReg(VirtReg))
|
||||
if (!VRM->getPreSplitReg(VirtReg))
|
||||
continue; // Split interval spilled again.
|
||||
const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
|
||||
unsigned Phys = VRM.getPhys(VirtReg);
|
||||
int StackSlot = VRM.getStackSlot(VirtReg);
|
||||
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
|
||||
unsigned Phys = VRM->getPhys(VirtReg);
|
||||
int StackSlot = VRM->getStackSlot(VirtReg);
|
||||
MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
|
||||
TII->storeRegToStackSlot(MBB, llvm::next(MII), Phys, isKill, StackSlot, RC);
|
||||
TII->storeRegToStackSlot(*MBB, llvm::next(MII), Phys, isKill, StackSlot,
|
||||
RC);
|
||||
MachineInstr *StoreMI = prior(oldNextMII);
|
||||
VRM.addSpillSlotUse(StackSlot, StoreMI);
|
||||
VRM->addSpillSlotUse(StackSlot, StoreMI);
|
||||
DEBUG(dbgs() << "Store:\t" << *StoreMI);
|
||||
VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
|
||||
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
|
||||
}
|
||||
NextMII = llvm::next(MII);
|
||||
}
|
||||
@ -1832,7 +1873,7 @@ private:
|
||||
if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
|
||||
// Ignore physregs for spilling, but remember that it is used by this
|
||||
// function.
|
||||
RegInfo->setPhysRegUsed(VirtReg);
|
||||
MRI->setPhysRegUsed(VirtReg);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1857,16 +1898,16 @@ private:
|
||||
"Not a virtual register?");
|
||||
|
||||
unsigned SubIdx = MI.getOperand(i).getSubReg();
|
||||
if (VRM.isAssignedReg(VirtReg)) {
|
||||
if (VRM->isAssignedReg(VirtReg)) {
|
||||
// This virtual register was assigned a physreg!
|
||||
unsigned Phys = VRM.getPhys(VirtReg);
|
||||
RegInfo->setPhysRegUsed(Phys);
|
||||
unsigned Phys = VRM->getPhys(VirtReg);
|
||||
MRI->setPhysRegUsed(Phys);
|
||||
if (MI.getOperand(i).isDef())
|
||||
ReusedOperands.markClobbered(Phys);
|
||||
substitutePhysReg(MI.getOperand(i), Phys, *TRI);
|
||||
if (VRM.isImplicitlyDefined(VirtReg))
|
||||
if (VRM->isImplicitlyDefined(VirtReg))
|
||||
// FIXME: Is this needed?
|
||||
BuildMI(MBB, &MI, MI.getDebugLoc(),
|
||||
BuildMI(*MBB, &MI, MI.getDebugLoc(),
|
||||
TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
|
||||
continue;
|
||||
}
|
||||
@ -1885,9 +1926,9 @@ private:
|
||||
// interval of r1025. Now suppose both registers are spilled, you can
|
||||
// easily see a situation where both registers are reloaded before
|
||||
// the INSERT_SUBREG and both target registers that would overlap.
|
||||
bool DoReMat = VRM.isReMaterialized(VirtReg);
|
||||
bool DoReMat = VRM->isReMaterialized(VirtReg);
|
||||
int SSorRMId = DoReMat
|
||||
? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
|
||||
? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
|
||||
int ReuseSlot = SSorRMId;
|
||||
|
||||
// Check to see if this stack slot is available.
|
||||
@ -1904,7 +1945,7 @@ private:
|
||||
// fi#1 is available in EDI, but it cannot be reused because it's not in
|
||||
// the right register file.
|
||||
if (PhysReg && !AvoidReload && (SubIdx || MI.isExtractSubreg())) {
|
||||
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
|
||||
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
|
||||
if (!RC->contains(PhysReg))
|
||||
PhysReg = 0;
|
||||
}
|
||||
@ -1935,7 +1976,7 @@ private:
|
||||
DEBUG(dbgs() << " from physreg "
|
||||
<< TRI->getName(PhysReg) << " for vreg"
|
||||
<< VirtReg <<" instead of reloading into physreg "
|
||||
<< TRI->getName(VRM.getPhys(VirtReg)) << '\n');
|
||||
<< TRI->getName(VRM->getPhys(VirtReg)) << '\n');
|
||||
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
|
||||
MI.getOperand(i).setReg(RReg);
|
||||
MI.getOperand(i).setSubReg(0);
|
||||
@ -1955,7 +1996,7 @@ private:
|
||||
// case, we actually insert a reload for V1 in R1, ensuring that
|
||||
// we can get at R0 or its alias.
|
||||
ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
|
||||
VRM.getPhys(VirtReg), VirtReg);
|
||||
VRM->getPhys(VirtReg), VirtReg);
|
||||
if (isTied)
|
||||
// Only mark it clobbered if this is a use&def operand.
|
||||
ReusedOperands.markClobbered(PhysReg);
|
||||
@ -1994,7 +2035,7 @@ private:
|
||||
// To avoid this problem, and to avoid doing a load right after a store,
|
||||
// we emit a copy from PhysReg into the designated register for this
|
||||
// operand.
|
||||
unsigned DesignatedReg = VRM.getPhys(VirtReg);
|
||||
unsigned DesignatedReg = VRM->getPhys(VirtReg);
|
||||
assert(DesignatedReg && "Must map virtreg to physreg!");
|
||||
|
||||
// Note that, if we reused a register for a previous operand, the
|
||||
@ -2002,9 +2043,9 @@ private:
|
||||
// available. If this occurs, use the register indicated by the
|
||||
// reuser.
|
||||
if (ReusedOperands.hasReuses())
|
||||
DesignatedReg = ReusedOperands.GetRegForReload(VirtReg,
|
||||
DesignatedReg, &MI,
|
||||
Spills, MaybeDeadStores, RegKills, KillOps, VRM);
|
||||
DesignatedReg = ReusedOperands.
|
||||
GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
|
||||
MaybeDeadStores, RegKills, KillOps, *VRM);
|
||||
|
||||
// If the mapped designated register is actually the physreg we have
|
||||
// incoming, we don't need to inserted a dead copy.
|
||||
@ -2026,16 +2067,16 @@ private:
|
||||
continue;
|
||||
}
|
||||
|
||||
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
|
||||
RegInfo->setPhysRegUsed(DesignatedReg);
|
||||
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
|
||||
MRI->setPhysRegUsed(DesignatedReg);
|
||||
ReusedOperands.markClobbered(DesignatedReg);
|
||||
|
||||
// Back-schedule reloads and remats.
|
||||
MachineBasicBlock::iterator InsertLoc =
|
||||
ComputeReloadLoc(&MI, MBB.begin(), PhysReg, TRI, DoReMat,
|
||||
ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
|
||||
SSorRMId, TII, MF);
|
||||
|
||||
TII->copyRegToReg(MBB, InsertLoc, DesignatedReg, PhysReg, RC, RC);
|
||||
TII->copyRegToReg(*MBB, InsertLoc, DesignatedReg, PhysReg, RC, RC);
|
||||
|
||||
MachineInstr *CopyMI = prior(InsertLoc);
|
||||
CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
|
||||
@ -2055,7 +2096,7 @@ private:
|
||||
} // if (PhysReg)
|
||||
|
||||
// Otherwise, reload it and remember that we have it.
|
||||
PhysReg = VRM.getPhys(VirtReg);
|
||||
PhysReg = VRM->getPhys(VirtReg);
|
||||
assert(PhysReg && "Must map virtreg to physreg!");
|
||||
|
||||
// Note that, if we reused a register for a previous operand, the
|
||||
@ -2064,25 +2105,25 @@ private:
|
||||
// reuser.
|
||||
if (ReusedOperands.hasReuses())
|
||||
PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
|
||||
Spills, MaybeDeadStores, RegKills, KillOps, VRM);
|
||||
Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
|
||||
|
||||
RegInfo->setPhysRegUsed(PhysReg);
|
||||
MRI->setPhysRegUsed(PhysReg);
|
||||
ReusedOperands.markClobbered(PhysReg);
|
||||
if (AvoidReload)
|
||||
++NumAvoided;
|
||||
else {
|
||||
// Back-schedule reloads and remats.
|
||||
MachineBasicBlock::iterator InsertLoc =
|
||||
ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, DoReMat,
|
||||
ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, DoReMat,
|
||||
SSorRMId, TII, MF);
|
||||
|
||||
if (DoReMat) {
|
||||
ReMaterialize(MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, VRM);
|
||||
ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
|
||||
} else {
|
||||
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
|
||||
TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SSorRMId, RC);
|
||||
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
|
||||
TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC);
|
||||
MachineInstr *LoadMI = prior(InsertLoc);
|
||||
VRM.addSpillSlotUse(SSorRMId, LoadMI);
|
||||
VRM->addSpillSlotUse(SSorRMId, LoadMI);
|
||||
++NumLoads;
|
||||
DistanceMap.insert(std::make_pair(LoadMI, Dist++));
|
||||
}
|
||||
@ -2118,8 +2159,8 @@ private:
|
||||
if (DeadStore) {
|
||||
DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
|
||||
InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(DeadStore);
|
||||
MBB.erase(DeadStore);
|
||||
VRM->RemoveMachineInstrFromMaps(DeadStore);
|
||||
MBB->erase(DeadStore);
|
||||
MaybeDeadStores[PDSSlot] = NULL;
|
||||
++NumDSE;
|
||||
}
|
||||
@ -2133,7 +2174,7 @@ private:
|
||||
// physical registers that may contain the value of the spilled virtual
|
||||
// register
|
||||
SmallSet<int, 2> FoldedSS;
|
||||
for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
|
||||
for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
|
||||
unsigned VirtReg = I->second.first;
|
||||
VirtRegMap::ModRef MR = I->second.second;
|
||||
DEBUG(dbgs() << "Folded vreg: " << VirtReg << " MR: " << MR);
|
||||
@ -2141,7 +2182,7 @@ private:
|
||||
// MI2VirtMap be can updated which invalidate the iterator.
|
||||
// Increment the iterator first.
|
||||
++I;
|
||||
int SS = VRM.getStackSlot(VirtReg);
|
||||
int SS = VRM->getStackSlot(VirtReg);
|
||||
if (SS == VirtRegMap::NO_STACK_SLOT)
|
||||
continue;
|
||||
FoldedSS.insert(SS);
|
||||
@ -2158,8 +2199,8 @@ private:
|
||||
if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
|
||||
DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
|
||||
if (DestReg != InReg) {
|
||||
const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
|
||||
TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
|
||||
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
|
||||
TII->copyRegToReg(*MBB, &MI, DestReg, InReg, RC, RC);
|
||||
MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
|
||||
unsigned SubIdx = DefMO->getSubReg();
|
||||
// Revisit the copy so we make sure to notice the effects of the
|
||||
@ -2187,8 +2228,8 @@ private:
|
||||
}
|
||||
|
||||
InvalidateKills(MI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
Erased = true;
|
||||
goto ProcessNextInst;
|
||||
}
|
||||
@ -2197,10 +2238,10 @@ private:
|
||||
SmallVector<MachineInstr*, 4> NewMIs;
|
||||
if (PhysReg &&
|
||||
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
|
||||
MBB.insert(MII, NewMIs[0]);
|
||||
MBB->insert(MII, NewMIs[0]);
|
||||
InvalidateKills(MI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
Erased = true;
|
||||
--NextMII; // backtrack to the unfolded instruction.
|
||||
BackTracked = true;
|
||||
@ -2231,13 +2272,13 @@ private:
|
||||
// super-register is needed below.
|
||||
if (KillOpnd && !KillOpnd->getSubReg() &&
|
||||
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
|
||||
MBB.insert(MII, NewMIs[0]);
|
||||
MBB->insert(MII, NewMIs[0]);
|
||||
NewStore = NewMIs[1];
|
||||
MBB.insert(MII, NewStore);
|
||||
VRM.addSpillSlotUse(SS, NewStore);
|
||||
MBB->insert(MII, NewStore);
|
||||
VRM->addSpillSlotUse(SS, NewStore);
|
||||
InvalidateKills(MI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
Erased = true;
|
||||
--NextMII;
|
||||
--NextMII; // backtrack to the unfolded instruction.
|
||||
@ -2252,8 +2293,8 @@ private:
|
||||
// If we get here, the store is dead, nuke it now.
|
||||
DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
|
||||
InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(DeadStore);
|
||||
MBB.erase(DeadStore);
|
||||
VRM->RemoveMachineInstrFromMaps(DeadStore);
|
||||
MBB->erase(DeadStore);
|
||||
if (!NewStore)
|
||||
++NumDSE;
|
||||
}
|
||||
@ -2262,7 +2303,7 @@ private:
|
||||
if (NewStore) {
|
||||
// Treat this store as a spill merged into a copy. That makes the
|
||||
// stack slot value available.
|
||||
VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
|
||||
VRM->virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
|
||||
goto ProcessNextInst;
|
||||
}
|
||||
}
|
||||
@ -2283,8 +2324,8 @@ private:
|
||||
assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
|
||||
"Src hasn't been allocated yet?");
|
||||
|
||||
if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
|
||||
Spills, RegKills, KillOps, TRI, VRM)) {
|
||||
if (CommuteToFoldReload(MII, VirtReg, SrcReg, StackSlot,
|
||||
Spills, RegKills, KillOps, TRI)) {
|
||||
NextMII = llvm::next(MII);
|
||||
BackTracked = true;
|
||||
goto ProcessNextInst;
|
||||
@ -2330,10 +2371,10 @@ private:
|
||||
TRI->isSubRegister(KillRegs[0], Dst) ||
|
||||
TRI->isSuperRegister(KillRegs[0], Dst));
|
||||
// Last def is now dead.
|
||||
TransferDeadness(&MBB, Dist, Src, RegKills, KillOps, VRM);
|
||||
TransferDeadness(Dist, Src, RegKills, KillOps);
|
||||
}
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
Erased = true;
|
||||
Spills.disallowClobberPhysReg(VirtReg);
|
||||
goto ProcessNextInst;
|
||||
@ -2360,13 +2401,13 @@ private:
|
||||
}
|
||||
|
||||
unsigned SubIdx = MO.getSubReg();
|
||||
bool DoReMat = VRM.isReMaterialized(VirtReg);
|
||||
bool DoReMat = VRM->isReMaterialized(VirtReg);
|
||||
if (DoReMat)
|
||||
ReMatDefs.insert(&MI);
|
||||
|
||||
// The only vregs left are stack slot definitions.
|
||||
int StackSlot = VRM.getStackSlot(VirtReg);
|
||||
const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
|
||||
int StackSlot = VRM->getStackSlot(VirtReg);
|
||||
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
|
||||
|
||||
// If this def is part of a two-address operand, make sure to execute
|
||||
// the store from the correct physical register.
|
||||
@ -2381,17 +2422,17 @@ private:
|
||||
PhysReg = SuperReg;
|
||||
}
|
||||
} else {
|
||||
PhysReg = VRM.getPhys(VirtReg);
|
||||
PhysReg = VRM->getPhys(VirtReg);
|
||||
if (ReusedOperands.isClobbered(PhysReg)) {
|
||||
// Another def has taken the assigned physreg. It must have been a
|
||||
// use&def which got it due to reuse. Undo the reuse!
|
||||
PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
|
||||
Spills, MaybeDeadStores, RegKills, KillOps, VRM);
|
||||
Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
|
||||
}
|
||||
}
|
||||
|
||||
assert(PhysReg && "VR not assigned a physical register?");
|
||||
RegInfo->setPhysRegUsed(PhysReg);
|
||||
MRI->setPhysRegUsed(PhysReg);
|
||||
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
|
||||
ReusedOperands.markClobbered(RReg);
|
||||
MI.getOperand(i).setReg(RReg);
|
||||
@ -2399,8 +2440,8 @@ private:
|
||||
|
||||
if (!MO.isDead()) {
|
||||
MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
|
||||
SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
|
||||
LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
|
||||
SpillRegToStackSlot(MII, -1, PhysReg, StackSlot, RC, true,
|
||||
LastStore, Spills, ReMatDefs, RegKills, KillOps);
|
||||
NextMII = llvm::next(MII);
|
||||
|
||||
// Check to see if this is a noop copy. If so, eliminate the
|
||||
@ -2411,8 +2452,8 @@ private:
|
||||
++NumDCE;
|
||||
DEBUG(dbgs() << "Removing now-noop copy: " << MI);
|
||||
InvalidateKills(MI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
Erased = true;
|
||||
UpdateKills(*LastStore, TRI, RegKills, KillOps);
|
||||
goto ProcessNextInst;
|
||||
@ -2424,8 +2465,8 @@ private:
|
||||
// Delete dead instructions without side effects.
|
||||
if (!Erased && !BackTracked && isSafeToDelete(MI)) {
|
||||
InvalidateKills(MI, TRI, RegKills, KillOps);
|
||||
VRM.RemoveMachineInstrFromMaps(&MI);
|
||||
MBB.erase(&MI);
|
||||
VRM->RemoveMachineInstrFromMaps(&MI);
|
||||
MBB->erase(&MI);
|
||||
Erased = true;
|
||||
}
|
||||
if (!Erased)
|
||||
@ -2439,10 +2480,6 @@ private:
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
|
||||
switch (RewriterOpt) {
|
||||
default: llvm_unreachable("Unreachable!");
|
||||
|
Reference in New Issue
Block a user