Change the heuristics used in the coalescer, register allocator, and within

live intervals itself to use an instruction count approximation that is 
not affected by inserting empty indices.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@53937 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Owen Anderson 2008-07-22 22:46:49 +00:00
parent 38bcec13e8
commit a1566f2e12
5 changed files with 29 additions and 14 deletions

View File

@ -75,6 +75,9 @@ namespace llvm {
/// and MBB id.
std::vector<IdxMBBPair> Idx2MBBMap;
/// FunctionSize - The number of instructions present in the function
uint64_t FunctionSize;
typedef std::map<MachineInstr*, unsigned> Mi2IndexMap;
Mi2IndexMap mi2iMap_;
@ -169,11 +172,18 @@ namespace llvm {
return MBB2IdxMap[MBBNo].second;
}
/// getIntervalSize - get the size of an interval in "units,"
/// getScaledIntervalSize - get the size of an interval in "units,"
/// where every function is composed of one thousand units. This
/// measure scales properly with empty index slots in the function.
unsigned getScaledIntervalSize(LiveInterval& I) const {
return (1000 / InstrSlots::NUM * I.getSize()) / i2miMap_.size();
double getScaledIntervalSize(LiveInterval& I) {
return (1000.0 / InstrSlots::NUM * I.getSize()) / i2miMap_.size();
}
/// getApproximateInstructionCount - computes an estimate of the number
/// of instructions in a given LiveInterval.
unsigned getApproximateInstructionCount(LiveInterval& I) {
double IntervalPercentage = getScaledIntervalSize(I) / 1000.0;
return IntervalPercentage * FunctionSize;
}
/// getMBBFromIndex - given an index in any instruction of an

View File

@ -87,6 +87,8 @@ void LiveIntervals::computeNumbering() {
mi2iMap_.clear();
i2miMap_.clear();
FunctionSize = 0;
// Number MachineInstrs and MachineBasicBlocks.
// Initialize MBB indexes to a sentinal.
MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
@ -102,6 +104,8 @@ void LiveIntervals::computeNumbering() {
assert(inserted && "multiple MachineInstr -> index mappings");
i2miMap_.push_back(I);
MIIndex += InstrSlots::NUM;
FunctionSize++;
}
if (StartIdx == MIIndex) {
@ -1789,7 +1793,7 @@ addIntervalsForSpills(const LiveInterval &li,
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
LiveInterval *LI = NewLIs[i];
if (!LI->empty()) {
LI->weight /= LI->getSize();
LI->weight /= getApproximateInstructionCount(*LI);
if (!AddedKill.count(LI)) {
LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
unsigned LastUseIdx = getBaseIndex(LR->end);

View File

@ -851,7 +851,8 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
if (minWeight == HUGE_VALF) {
// All registers must have inf weight. Just grab one!
minReg = BestPhysReg ? BestPhysReg : *RC->allocation_order_begin(*mf_);
if (cur->weight == HUGE_VALF || cur->getSize() == 1)
if (cur->weight == HUGE_VALF ||
li_->getApproximateInstructionCount(*cur) == 1)
// Spill a physical register around defs and uses.
li_->spillPhysRegAroundRegDefsUses(*cur, minReg, *vrm_);
}

View File

@ -851,8 +851,8 @@ SimpleRegisterCoalescing::isProfitableToCoalesceToSubRC(unsigned SrcReg,
// Then make sure the intervals are *short*.
LiveInterval &SrcInt = li_->getInterval(SrcReg);
LiveInterval &DstInt = li_->getInterval(DstReg);
unsigned SrcSize = SrcInt.getSize() / InstrSlots::NUM;
unsigned DstSize = DstInt.getSize() / InstrSlots::NUM;
unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt);
unsigned DstSize = li_->getApproximateInstructionCount(DstInt);
const TargetRegisterClass *RC = mri_->getRegClass(DstReg);
unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
return (SrcSize + DstSize) <= Threshold;
@ -1011,10 +1011,10 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
if (SubIdx) {
unsigned LargeReg = isExtSubReg ? SrcReg : DstReg;
unsigned SmallReg = isExtSubReg ? DstReg : SrcReg;
unsigned LargeRegSize =
li_->getInterval(LargeReg).getSize() / InstrSlots::NUM;
unsigned SmallRegSize =
li_->getInterval(SmallReg).getSize() / InstrSlots::NUM;
unsigned LargeRegSize =
li_->getApproximateInstructionCount(li_->getInterval(LargeReg));
unsigned SmallRegSize =
li_->getApproximateInstructionCount(li_->getInterval(SmallReg));
const TargetRegisterClass *RC = mri_->getRegClass(SmallReg);
unsigned Threshold = allocatableRCRegs_[RC].count();
// Be conservative. If both sides are virtual registers, do not coalesce
@ -1081,7 +1081,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// If the virtual register live interval is long but it has low use desity,
// do not join them, instead mark the physical register as its allocation
// preference.
unsigned Length = JoinVInt.getSize() / InstrSlots::NUM;
unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
if (Length > Threshold &&
(((float)std::distance(mri_->use_begin(JoinVReg),
mri_->use_end()) / Length) < (1.0 / Threshold))) {
@ -2196,7 +2196,7 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
// Divide the weight of the interval by its size. This encourages
// spilling of intervals that are large and have few uses, and
// discourages spilling of small intervals with many uses.
LI.weight /= LI.getSize();
LI.weight /= li_->getApproximateInstructionCount(LI);
}
}

View File

@ -126,7 +126,7 @@ namespace llvm {
unsigned getRepIntervalSize(unsigned Reg) {
if (!li_->hasInterval(Reg))
return 0;
return li_->getInterval(Reg).getSize();
return li_->getApproximateInstructionCount(li_->getInterval(Reg));
}
/// print - Implement the dump method.