Build the Hopfield network incrementally when splitting global live ranges.

It is common for large live ranges to have few basic blocks with register uses
and many live-through blocks without any uses. This approach grows the Hopfield
network incrementally around the use blocks, completely avoiding checking
interference for some through blocks.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129188 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jakob Stoklund Olesen
2011-04-09 02:59:09 +00:00
parent 9d29cbad32
commit f4afdfc501
7 changed files with 183 additions and 84 deletions

View File

@ -16,6 +16,7 @@
#ifndef LLVM_CODEGEN_EDGEBUNDLES_H #ifndef LLVM_CODEGEN_EDGEBUNDLES_H
#define LLVM_CODEGEN_EDGEBUNDLES_H #define LLVM_CODEGEN_EDGEBUNDLES_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntEqClasses.h" #include "llvm/ADT/IntEqClasses.h"
#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineFunctionPass.h"
@ -29,6 +30,9 @@ class EdgeBundles : public MachineFunctionPass {
/// 2*BB->getNumber()+1 -> Outgoing bundle. /// 2*BB->getNumber()+1 -> Outgoing bundle.
IntEqClasses EC; IntEqClasses EC;
/// Blocks - Map each bundle to a list of basic block numbers.
SmallVector<SmallVector<unsigned, 8>, 4> Blocks;
public: public:
static char ID; static char ID;
EdgeBundles() : MachineFunctionPass(ID) {} EdgeBundles() : MachineFunctionPass(ID) {}
@ -40,6 +44,9 @@ public:
/// getNumBundles - Return the total number of bundles in the CFG. /// getNumBundles - Return the total number of bundles in the CFG.
unsigned getNumBundles() const { return EC.getNumClasses(); } unsigned getNumBundles() const { return EC.getNumClasses(); }
/// getBlocks - Return an array of blocks that are connected to Bundle.
ArrayRef<unsigned> getBlocks(unsigned Bundle) { return Blocks[Bundle]; }
/// getMachineFunction - Return the last machine function computed. /// getMachineFunction - Return the last machine function computed.
const MachineFunction *getMachineFunction() const { return MF; } const MachineFunction *getMachineFunction() const { return MF; }

View File

@ -53,6 +53,19 @@ bool EdgeBundles::runOnMachineFunction(MachineFunction &mf) {
EC.compress(); EC.compress();
if (ViewEdgeBundles) if (ViewEdgeBundles)
view(); view();
// Compute the reverse mapping.
Blocks.clear();
Blocks.resize(getNumBundles());
for (unsigned i = 0, e = MF->getNumBlockIDs(); i != e; ++i) {
unsigned b0 = getBundle(i, 0);
unsigned b1 = getBundle(i, 1);
Blocks[b0].push_back(i);
if (b1 != b0)
Blocks[b1].push_back(i);
}
return false; return false;
} }
@ -82,5 +95,3 @@ raw_ostream &llvm::WriteGraph(raw_ostream &O, const EdgeBundles &G,
O << "}\n"; O << "}\n";
return O; return O;
} }

View File

@ -22,6 +22,7 @@
#include "SpillPlacement.h" #include "SpillPlacement.h"
#include "SplitKit.h" #include "SplitKit.h"
#include "VirtRegMap.h" #include "VirtRegMap.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/Statistic.h" #include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Function.h" #include "llvm/Function.h"
@ -126,9 +127,8 @@ class RAGreedy : public MachineFunctionPass,
/// All basic blocks where the current register has uses. /// All basic blocks where the current register has uses.
SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
/// All basic blocks where the current register is live-through and /// Live-through blocks that have already been added to SpillPlacer.
/// interference free. SparseBitVector<> ActiveThroughBlocks;
SmallVector<unsigned, 8> TransparentBlocks;
/// Global live range splitting candidate info. /// Global live range splitting candidate info.
struct GlobalSplitCandidate { struct GlobalSplitCandidate {
@ -173,7 +173,9 @@ private:
void LRE_WillShrinkVirtReg(unsigned); void LRE_WillShrinkVirtReg(unsigned);
void LRE_DidCloneVirtReg(unsigned, unsigned); void LRE_DidCloneVirtReg(unsigned, unsigned);
bool addSplitConstraints(unsigned, float&); bool addSplitConstraints(InterferenceCache::Cursor, float&);
void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
void growRegion(InterferenceCache::Cursor);
float calcGlobalSplitCost(unsigned, const BitVector&); float calcGlobalSplitCost(unsigned, const BitVector&);
void splitAroundRegion(LiveInterval&, unsigned, const BitVector&, void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
SmallVectorImpl<LiveInterval*>&); SmallVectorImpl<LiveInterval*>&);
@ -417,9 +419,9 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
/// interference pattern in Physreg and its aliases. Add the constraints to /// interference pattern in Physreg and its aliases. Add the constraints to
/// SpillPlacement and return the static cost of this split in Cost, assuming /// SpillPlacement and return the static cost of this split in Cost, assuming
/// that all preferences in SplitConstraints are met. /// that all preferences in SplitConstraints are met.
/// If it is evident that no bundles will be live, abort early and return false. /// Return false if there are no bundles with positive bias.
bool RAGreedy::addSplitConstraints(unsigned PhysReg, float &Cost) { bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
InterferenceCache::Cursor Intf(IntfCache, PhysReg); float &Cost) {
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
// Reset interference dependent info. // Reset interference dependent info.
@ -464,35 +466,41 @@ bool RAGreedy::addSplitConstraints(unsigned PhysReg, float &Cost) {
if (Ins) if (Ins)
StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
} }
Cost = StaticCost;
// Add constraints for use-blocks. Note that these are the only constraints // Add constraints for use-blocks. Note that these are the only constraints
// that may add a positive bias, it is downhill from here. // that may add a positive bias, it is downhill from here.
SpillPlacer->addConstraints(SplitConstraints); SpillPlacer->addConstraints(SplitConstraints);
if (SpillPlacer->getPositiveNodes() == 0) return SpillPlacer->scanActiveBundles();
return false; }
Cost = StaticCost;
// Now handle the live-through blocks without uses. These can only add /// addThroughConstraints - Add constraints and links to SpillPlacer from the
// negative bias, so we can abort whenever there are no more positive nodes. /// live-through blocks in Blocks.
// Compute constraints for a group of 8 blocks at a time. void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
ArrayRef<unsigned> Blocks) {
const unsigned GroupSize = 8; const unsigned GroupSize = 8;
SpillPlacement::BlockConstraint BCS[GroupSize]; SpillPlacement::BlockConstraint BCS[GroupSize];
unsigned B = 0; unsigned TBS[GroupSize];
TransparentBlocks.clear(); unsigned B = 0, T = 0;
ArrayRef<unsigned> ThroughBlocks = SA->getThroughBlocks(); for (unsigned i = 0; i != Blocks.size(); ++i) {
for (unsigned i = 0; i != ThroughBlocks.size(); ++i) { unsigned Number = Blocks[i];
unsigned Number = ThroughBlocks[i];
assert(B < GroupSize && "Array overflow");
BCS[B].Number = Number;
Intf.moveToBlock(Number); Intf.moveToBlock(Number);
if (!Intf.hasInterference()) { if (!Intf.hasInterference()) {
TransparentBlocks.push_back(Number); assert(T < GroupSize && "Array overflow");
TBS[T] = Number;
if (++T == GroupSize) {
SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
T = 0;
}
continue; continue;
} }
assert(B < GroupSize && "Array overflow");
BCS[B].Number = Number;
// Interference for the live-in value. // Interference for the live-in value.
if (Intf.first() <= Indexes->getMBBStartIdx(Number)) if (Intf.first() <= Indexes->getMBBStartIdx(Number))
BCS[B].Entry = SpillPlacement::MustSpill; BCS[B].Entry = SpillPlacement::MustSpill;
@ -509,22 +517,55 @@ bool RAGreedy::addSplitConstraints(unsigned PhysReg, float &Cost) {
ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
SpillPlacer->addConstraints(Array); SpillPlacer->addConstraints(Array);
B = 0; B = 0;
// Abort early when all hope is lost.
if (SpillPlacer->getPositiveNodes() == 0)
return false;
} }
} }
ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
SpillPlacer->addConstraints(Array); SpillPlacer->addConstraints(Array);
if (SpillPlacer->getPositiveNodes() == 0) SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
return false;
// There is still some positive bias. Add all the links.
SpillPlacer->addLinks(TransparentBlocks);
return true;
} }
void RAGreedy::growRegion(InterferenceCache::Cursor Intf) {
// Keep track of through blocks that have already been added to SpillPlacer.
SparseBitVector<> Added;
SmallVector<unsigned, 16> ThroughBlocks;
#ifndef NDEBUG
unsigned Visited = 0;
#endif
for (;;) {
ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
if (NewBundles.empty())
break;
// Find new through blocks in the periphery of PrefRegBundles.
for (int i = 0, e = NewBundles.size(); i != e; ++i) {
unsigned Bundle = NewBundles[i];
// Look at all blocks connected to Bundle in the full graph.
ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
I != E; ++I) {
unsigned Block = *I;
if (!SA->isThroughBlock(Block) || !Added.test_and_set(Block))
continue;
// This is a new through block. Add it to SpillPlacer later.
ThroughBlocks.push_back(Block);
#ifndef NDEBUG
++Visited;
#endif
}
}
// Any new blocks to add?
if (!ThroughBlocks.empty()) {
addThroughConstraints(Intf, ThroughBlocks);
ThroughBlocks.clear();
}
// Perhaps iterating can enable more bundles?
SpillPlacer->iterate();
}
// Rememeber the relevant set of through blocks for splitAroundRegion().
ActiveThroughBlocks |= Added;
DEBUG(dbgs() << ", v=" << Visited);
}
/// calcGlobalSplitCost - Return the global split cost of following the split /// calcGlobalSplitCost - Return the global split cost of following the split
/// pattern in LiveBundles. This cost should be added to the local cost of the /// pattern in LiveBundles. This cost should be added to the local cost of the
@ -550,10 +591,9 @@ float RAGreedy::calcGlobalSplitCost(unsigned PhysReg,
} }
InterferenceCache::Cursor Intf(IntfCache, PhysReg); InterferenceCache::Cursor Intf(IntfCache, PhysReg);
ArrayRef<unsigned> ThroughBlocks = SA->getThroughBlocks(); for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
SplitConstraints.resize(UseBlocks.size() + ThroughBlocks.size()); E = ActiveThroughBlocks.end(); I != E; ++I) {
for (unsigned i = 0; i != ThroughBlocks.size(); ++i) { unsigned Number = *I;
unsigned Number = ThroughBlocks[i];
bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
if (!RegIn && !RegOut) if (!RegIn && !RegOut)
@ -766,9 +806,9 @@ void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
} }
// Handle live-through blocks. // Handle live-through blocks.
ArrayRef<unsigned> ThroughBlocks = SA->getThroughBlocks(); for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
for (unsigned i = 0; i != ThroughBlocks.size(); ++i) { E = ActiveThroughBlocks.end(); I != E; ++I) {
unsigned Number = ThroughBlocks[i]; unsigned Number = *I;
bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
DEBUG(dbgs() << "Live through BB#" << Number << '\n'); DEBUG(dbgs() << "Live through BB#" << Number << '\n');
@ -804,6 +844,7 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
BitVector LiveBundles, BestBundles; BitVector LiveBundles, BestBundles;
float BestCost = 0; float BestCost = 0;
unsigned BestReg = 0; unsigned BestReg = 0;
ActiveThroughBlocks.clear();
Order.rewind(); Order.rewind();
for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) { for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
@ -813,16 +854,17 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
SpillPlacer->prepare(LiveBundles); SpillPlacer->prepare(LiveBundles);
float Cost; float Cost;
if (!addSplitConstraints(PhysReg, Cost)) { InterferenceCache::Cursor Intf(IntfCache, PhysReg);
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bias\n"); if (!addSplitConstraints(Intf, Cost)) {
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
continue; continue;
} }
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tbiased = " DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
<< SpillPlacer->getPositiveNodes() << ", static = " << Cost);
if (BestReg && Cost >= BestCost) { if (BestReg && Cost >= BestCost) {
DEBUG(dbgs() << " worse than " << PrintReg(BestReg, TRI) << '\n'); DEBUG(dbgs() << " worse than " << PrintReg(BestReg, TRI) << '\n');
continue; continue;
} }
growRegion(Intf);
SpillPlacer->finish(); SpillPlacer->finish();

View File

@ -135,13 +135,10 @@ struct SpillPlacement::Node {
/// addBias - Bias this node from an ingoing[0] or outgoing[1] link. /// addBias - Bias this node from an ingoing[0] or outgoing[1] link.
/// Return the change to the total number of positive biases. /// Return the change to the total number of positive biases.
int addBias(float w, bool out) { void addBias(float w, bool out) {
// Normalize w relative to all connected blocks from that direction. // Normalize w relative to all connected blocks from that direction.
w *= Scale[out]; w *= Scale[out];
int Before = Bias > 0;
Bias += w; Bias += w;
int After = Bias > 0;
return After - Before;
} }
/// update - Recompute Value from Bias and Links. Return true when node /// update - Recompute Value from Bias and Links. Return true when node
@ -230,14 +227,14 @@ void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
if (I->Entry != DontCare) { if (I->Entry != DontCare) {
unsigned ib = bundles->getBundle(I->Number, 0); unsigned ib = bundles->getBundle(I->Number, 0);
activate(ib); activate(ib);
PositiveNodes += nodes[ib].addBias(Freq * Bias[I->Entry], 1); nodes[ib].addBias(Freq * Bias[I->Entry], 1);
} }
// Live-out from block? // Live-out from block?
if (I->Exit != DontCare) { if (I->Exit != DontCare) {
unsigned ob = bundles->getBundle(I->Number, 1); unsigned ob = bundles->getBundle(I->Number, 1);
activate(ob); activate(ob);
PositiveNodes += nodes[ob].addBias(Freq * Bias[I->Exit], 0); nodes[ob].addBias(Freq * Bias[I->Exit], 0);
} }
} }
} }
@ -254,16 +251,42 @@ void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
continue; continue;
activate(ib); activate(ib);
activate(ob); activate(ob);
if (nodes[ib].Links.empty() && !nodes[ib].mustSpill())
Linked.push_back(ib);
if (nodes[ob].Links.empty() && !nodes[ob].mustSpill())
Linked.push_back(ob);
float Freq = getBlockFrequency(Number); float Freq = getBlockFrequency(Number);
nodes[ib].addLink(ob, Freq, 1); nodes[ib].addLink(ob, Freq, 1);
nodes[ob].addLink(ib, Freq, 0); nodes[ob].addLink(ib, Freq, 0);
} }
} }
bool SpillPlacement::scanActiveBundles() {
Linked.clear();
RecentPositive.clear();
for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
nodes[n].update(nodes);
// A node that must spill, or a node without any links is not going to
// change its value ever again, so exclude it from iterations.
if (nodes[n].mustSpill())
continue;
if (!nodes[n].Links.empty())
Linked.push_back(n);
if (nodes[n].preferReg())
RecentPositive.push_back(n);
}
return !RecentPositive.empty();
}
/// iterate - Repeatedly update the Hopfield nodes until stability or the /// iterate - Repeatedly update the Hopfield nodes until stability or the
/// maximum number of iterations is reached. /// maximum number of iterations is reached.
/// @param Linked - Numbers of linked nodes that need updating. /// @param Linked - Numbers of linked nodes that need updating.
void SpillPlacement::iterate(const SmallVectorImpl<unsigned> &Linked) { void SpillPlacement::iterate() {
// First update the recently positive nodes. They have likely received new
// negative bias that will turn them off.
while (!RecentPositive.empty())
nodes[RecentPositive.pop_back_val()].update(nodes);
if (Linked.empty()) if (Linked.empty())
return; return;
@ -279,10 +302,13 @@ void SpillPlacement::iterate(const SmallVectorImpl<unsigned> &Linked) {
for (SmallVectorImpl<unsigned>::const_reverse_iterator I = for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) { llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) {
unsigned n = *I; unsigned n = *I;
bool C = nodes[n].update(nodes); if (nodes[n].update(nodes)) {
Changed |= C; Changed = true;
if (nodes[n].preferReg())
RecentPositive.push_back(n);
}
} }
if (!Changed) if (!Changed || !RecentPositive.empty())
return; return;
// Scan forwards, skipping the first node which was just updated. // Scan forwards, skipping the first node which was just updated.
@ -290,38 +316,29 @@ void SpillPlacement::iterate(const SmallVectorImpl<unsigned> &Linked) {
for (SmallVectorImpl<unsigned>::const_iterator I = for (SmallVectorImpl<unsigned>::const_iterator I =
llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) { llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
unsigned n = *I; unsigned n = *I;
bool C = nodes[n].update(nodes); if (nodes[n].update(nodes)) {
Changed |= C; Changed = true;
if (nodes[n].preferReg())
RecentPositive.push_back(n);
}
} }
if (!Changed) if (!Changed || !RecentPositive.empty())
return; return;
} }
} }
void SpillPlacement::prepare(BitVector &RegBundles) { void SpillPlacement::prepare(BitVector &RegBundles) {
Linked.clear();
RecentPositive.clear();
// Reuse RegBundles as our ActiveNodes vector. // Reuse RegBundles as our ActiveNodes vector.
ActiveNodes = &RegBundles; ActiveNodes = &RegBundles;
ActiveNodes->clear(); ActiveNodes->clear();
ActiveNodes->resize(bundles->getNumBundles()); ActiveNodes->resize(bundles->getNumBundles());
PositiveNodes = 0;
} }
bool bool
SpillPlacement::finish() { SpillPlacement::finish() {
assert(ActiveNodes && "Call prepare() first"); assert(ActiveNodes && "Call prepare() first");
// Update all active nodes, and find the ones that are actually linked to
// something so their value may change when iterating.
SmallVector<unsigned, 8> Linked;
for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
nodes[n].update(nodes);
// A node that must spill, or a node without any links is not going to
// change its value ever again, so exclude it from iterations.
if (!nodes[n].Links.empty() && !nodes[n].mustSpill())
Linked.push_back(n);
}
// Iterate the network to convergence.
iterate(Linked);
// Write preferences back to ActiveNodes. // Write preferences back to ActiveNodes.
bool Perfect = true; bool Perfect = true;

View File

@ -49,8 +49,12 @@ class SpillPlacement : public MachineFunctionPass {
// caller. // caller.
BitVector *ActiveNodes; BitVector *ActiveNodes;
// The number of active nodes with a positive bias. // Nodes with active links. Populated by scanActiveBundles.
unsigned PositiveNodes; SmallVector<unsigned, 8> Linked;
// Nodes that went positive during the last call to scanActiveBundles or
// iterate.
SmallVector<unsigned, 8> RecentPositive;
// Block frequencies are computed once. Indexed by block number. // Block frequencies are computed once. Indexed by block number.
SmallVector<float, 4> BlockFrequency; SmallVector<float, 4> BlockFrequency;
@ -95,9 +99,20 @@ public:
/// addLinks - Add transparent blocks with the given numbers. /// addLinks - Add transparent blocks with the given numbers.
void addLinks(ArrayRef<unsigned> Links); void addLinks(ArrayRef<unsigned> Links);
/// getPositiveNodes - Return the total number of graph nodes with a positive /// scanActiveBundles - Perform an initial scan of all bundles activated by
/// bias after adding constraints. /// addConstraints and addLinks, updating their state. Add all the bundles
unsigned getPositiveNodes() const { return PositiveNodes; } /// that now prefer a register to RecentPositive.
/// Prepare internal data structures for iterate.
/// Return true is there are any positive nodes.
bool scanActiveBundles();
/// iterate - Update the network iteratively until convergence, or new bundles
/// are found.
void iterate();
/// getRecentPositive - Return an array of bundles that became positive during
/// the previous call to scanActiveBundles or iterate.
ArrayRef<unsigned> getRecentPositive() { return RecentPositive; }
/// finish - Compute the optimal spill code placement given the /// finish - Compute the optimal spill code placement given the
/// constraints. No MustSpill constraints will be violated, and the smallest /// constraints. No MustSpill constraints will be violated, and the smallest
@ -120,7 +135,6 @@ private:
virtual void releaseMemory(); virtual void releaseMemory();
void activate(unsigned); void activate(unsigned);
void iterate(const SmallVectorImpl<unsigned>&);
}; };
} // end namespace llvm } // end namespace llvm

View File

@ -132,12 +132,14 @@ void SplitAnalysis::analyzeUses() {
DEBUG(dbgs() << "Analyze counted " DEBUG(dbgs() << "Analyze counted "
<< UseSlots.size() << " instrs in " << UseSlots.size() << " instrs in "
<< UseBlocks.size() << " blocks, through " << UseBlocks.size() << " blocks, through "
<< ThroughBlocks.size() << " blocks.\n"); << NumThroughBlocks << " blocks.\n");
} }
/// calcLiveBlockInfo - Fill the LiveBlocks array with information about blocks /// calcLiveBlockInfo - Fill the LiveBlocks array with information about blocks
/// where CurLI is live. /// where CurLI is live.
bool SplitAnalysis::calcLiveBlockInfo() { bool SplitAnalysis::calcLiveBlockInfo() {
ThroughBlocks.resize(MF.getNumBlockIDs());
NumThroughBlocks = 0;
if (CurLI->empty()) if (CurLI->empty())
return true; return true;
@ -193,9 +195,10 @@ bool SplitAnalysis::calcLiveBlockInfo() {
BI.LiveThrough = !hasGap && BI.LiveIn && BI.LiveOut; BI.LiveThrough = !hasGap && BI.LiveIn && BI.LiveOut;
if (Uses) if (Uses)
UseBlocks.push_back(BI); UseBlocks.push_back(BI);
else else {
ThroughBlocks.push_back(BI.MBB->getNumber()); ++NumThroughBlocks;
ThroughBlocks.set(BI.MBB->getNumber());
}
// FIXME: This should never happen. The live range stops or starts without a // FIXME: This should never happen. The live range stops or starts without a
// corresponding use. An earlier pass did something wrong. // corresponding use. An earlier pass did something wrong.
if (!BI.LiveThrough && !Uses) if (!BI.LiveThrough && !Uses)

View File

@ -89,7 +89,10 @@ private:
SmallVector<BlockInfo, 8> UseBlocks; SmallVector<BlockInfo, 8> UseBlocks;
/// ThroughBlocks - Block numbers where CurLI is live through without uses. /// ThroughBlocks - Block numbers where CurLI is live through without uses.
SmallVector<unsigned, 8> ThroughBlocks; BitVector ThroughBlocks;
/// NumThroughBlocks - Number of live-through blocks.
unsigned NumThroughBlocks;
SlotIndex computeLastSplitPoint(unsigned Num); SlotIndex computeLastSplitPoint(unsigned Num);
@ -135,9 +138,11 @@ public:
/// where CurLI has uses. /// where CurLI has uses.
ArrayRef<BlockInfo> getUseBlocks() { return UseBlocks; } ArrayRef<BlockInfo> getUseBlocks() { return UseBlocks; }
/// getThroughBlocks - Return an array of block numbers where CurLI is live /// getNumThroughBlocks - Return the number of through blocks.
/// through without uses. unsigned getNumThroughBlocks() const { return NumThroughBlocks; }
ArrayRef<unsigned> getThroughBlocks() { return ThroughBlocks; }
/// isThroughBlock - Return true if CurLI is live through MBB without uses.
bool isThroughBlock(unsigned MBB) const { return ThroughBlocks.test(MBB); }
typedef SmallPtrSet<const MachineBasicBlock*, 16> BlockPtrSet; typedef SmallPtrSet<const MachineBasicBlock*, 16> BlockPtrSet;