2004-07-23 17:56:30 +00:00
|
|
|
//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 19:59:42 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2004-07-19 02:13:59 +00:00
|
|
|
// This file implements the LiveInterval analysis pass. Given some numbering of
|
|
|
|
// each the machine instructions (in this implemention depth-first order) an
|
|
|
|
// interval [i, j) is said to be a live interval for register v if there is no
|
2008-03-13 23:04:27 +00:00
|
|
|
// instruction with number j' > j such that v is live at j' and there is no
|
2004-07-19 02:13:59 +00:00
|
|
|
// instruction with number i' < i such that v is live at i'. In this
|
|
|
|
// implementation intervals can have holes, i.e. an interval might look like
|
|
|
|
// [1,20), [50,65), [1000,1001).
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2004-07-23 17:56:30 +00:00
|
|
|
#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
|
|
|
|
#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
|
2003-11-20 03:32:25 +00:00
|
|
|
|
2009-08-19 20:52:54 +00:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2003-11-20 03:32:25 +00:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2005-09-21 04:18:25 +00:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2007-02-15 05:59:24 +00:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2007-04-17 20:32:26 +00:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2009-01-07 02:08:57 +00:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2007-08-13 23:45:17 +00:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2007-09-05 21:46:51 +00:00
|
|
|
#include "llvm/Support/Allocator.h"
|
2007-11-13 23:04:28 +00:00
|
|
|
#include <cmath>
|
2003-11-20 03:32:25 +00:00
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2008-07-25 00:02:30 +00:00
|
|
|
class AliasAnalysis;
|
2004-08-04 09:46:26 +00:00
|
|
|
class LiveVariables;
|
2007-12-11 02:09:15 +00:00
|
|
|
class MachineLoopInfo;
|
2008-02-10 18:45:23 +00:00
|
|
|
class TargetRegisterInfo;
|
2007-12-31 04:13:23 +00:00
|
|
|
class MachineRegisterInfo;
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
class TargetInstrInfo;
|
2007-04-17 20:32:26 +00:00
|
|
|
class TargetRegisterClass;
|
2004-08-04 09:46:26 +00:00
|
|
|
class VirtRegMap;
|
2007-10-17 02:10:22 +00:00
|
|
|
typedef std::pair<unsigned, MachineBasicBlock*> IdxMBBPair;
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2008-02-18 09:35:30 +00:00
|
|
|
inline bool operator<(unsigned V, const IdxMBBPair &IM) {
|
|
|
|
return V < IM.first;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool operator<(const IdxMBBPair &IM, unsigned V) {
|
|
|
|
return IM.first < V;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct Idx2MBBCompare {
|
|
|
|
bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
|
|
|
|
return LHS.first < RHS.first;
|
|
|
|
}
|
|
|
|
};
|
2008-08-13 22:08:30 +00:00
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
class LiveIntervals : public MachineFunctionPass {
|
|
|
|
MachineFunction* mf_;
|
2008-02-22 09:24:50 +00:00
|
|
|
MachineRegisterInfo* mri_;
|
2004-08-04 09:46:26 +00:00
|
|
|
const TargetMachine* tm_;
|
2008-02-10 18:45:23 +00:00
|
|
|
const TargetRegisterInfo* tri_;
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
const TargetInstrInfo* tii_;
|
2008-07-25 00:02:30 +00:00
|
|
|
AliasAnalysis *aa_;
|
2004-08-04 09:46:26 +00:00
|
|
|
LiveVariables* lv_;
|
|
|
|
|
2007-09-05 21:46:51 +00:00
|
|
|
/// Special pool allocator for VNInfo's (LiveInterval val#).
|
|
|
|
///
|
|
|
|
BumpPtrAllocator VNInfoAllocator;
|
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
/// MBB2IdxMap - The indexes of the first and last instructions in the
|
|
|
|
/// specified basic block.
|
|
|
|
std::vector<std::pair<unsigned, unsigned> > MBB2IdxMap;
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2007-10-17 02:10:22 +00:00
|
|
|
/// Idx2MBBMap - Sorted list of pairs of index of first instruction
|
|
|
|
/// and MBB id.
|
|
|
|
std::vector<IdxMBBPair> Idx2MBBMap;
|
|
|
|
|
2008-07-22 22:46:49 +00:00
|
|
|
/// FunctionSize - The number of instructions present in the function
|
|
|
|
uint64_t FunctionSize;
|
|
|
|
|
2009-07-22 21:56:14 +00:00
|
|
|
typedef DenseMap<const MachineInstr*, unsigned> Mi2IndexMap;
|
2004-08-04 09:46:26 +00:00
|
|
|
Mi2IndexMap mi2iMap_;
|
|
|
|
|
|
|
|
typedef std::vector<MachineInstr*> Index2MiMap;
|
|
|
|
Index2MiMap i2miMap_;
|
|
|
|
|
2008-08-13 22:08:30 +00:00
|
|
|
typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap;
|
2004-08-04 09:46:26 +00:00
|
|
|
Reg2IntervalMap r2iMap_;
|
|
|
|
|
2009-07-09 03:57:02 +00:00
|
|
|
DenseMap<MachineBasicBlock*, unsigned> terminatorGaps;
|
|
|
|
|
2007-02-15 05:59:24 +00:00
|
|
|
BitVector allocatableRegs_;
|
2007-03-01 02:03:03 +00:00
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
std::vector<MachineInstr*> ClonedMIs;
|
|
|
|
|
2009-06-02 16:53:25 +00:00
|
|
|
typedef LiveInterval::InstrSlots InstrSlots;
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
public:
|
2007-05-06 13:37:16 +00:00
|
|
|
static char ID; // Pass identification, replacement for typeid
|
2008-09-04 17:05:41 +00:00
|
|
|
LiveIntervals() : MachineFunctionPass(&ID) {}
|
2007-05-01 21:15:47 +00:00
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
static unsigned getBaseIndex(unsigned index) {
|
|
|
|
return index - (index % InstrSlots::NUM);
|
|
|
|
}
|
|
|
|
static unsigned getBoundaryIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index + InstrSlots::NUM - 1);
|
|
|
|
}
|
|
|
|
static unsigned getLoadIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index) + InstrSlots::LOAD;
|
|
|
|
}
|
|
|
|
static unsigned getUseIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index) + InstrSlots::USE;
|
|
|
|
}
|
|
|
|
static unsigned getDefIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index) + InstrSlots::DEF;
|
|
|
|
}
|
|
|
|
static unsigned getStoreIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index) + InstrSlots::STORE;
|
|
|
|
}
|
|
|
|
|
2008-06-21 06:45:54 +00:00
|
|
|
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
|
|
|
|
return (isDef + isUse) * powf(10.0F, (float)loopDepth);
|
2007-11-12 06:35:08 +00:00
|
|
|
}
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
typedef Reg2IntervalMap::iterator iterator;
|
2004-09-30 15:59:17 +00:00
|
|
|
typedef Reg2IntervalMap::const_iterator const_iterator;
|
|
|
|
const_iterator begin() const { return r2iMap_.begin(); }
|
|
|
|
const_iterator end() const { return r2iMap_.end(); }
|
2004-08-04 09:46:26 +00:00
|
|
|
iterator begin() { return r2iMap_.begin(); }
|
|
|
|
iterator end() { return r2iMap_.end(); }
|
2008-05-05 18:30:58 +00:00
|
|
|
unsigned getNumIntervals() const { return (unsigned)r2iMap_.size(); }
|
2004-08-04 09:46:26 +00:00
|
|
|
|
|
|
|
LiveInterval &getInterval(unsigned reg) {
|
|
|
|
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
|
|
|
|
assert(I != r2iMap_.end() && "Interval does not exist for register");
|
2008-08-13 21:49:13 +00:00
|
|
|
return *I->second;
|
2004-08-04 09:46:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const LiveInterval &getInterval(unsigned reg) const {
|
|
|
|
Reg2IntervalMap::const_iterator I = r2iMap_.find(reg);
|
|
|
|
assert(I != r2iMap_.end() && "Interval does not exist for register");
|
2008-08-13 21:49:13 +00:00
|
|
|
return *I->second;
|
2004-08-04 09:46:26 +00:00
|
|
|
}
|
|
|
|
|
2007-02-19 21:49:54 +00:00
|
|
|
bool hasInterval(unsigned reg) const {
|
2007-03-01 02:03:03 +00:00
|
|
|
return r2iMap_.count(reg);
|
2007-02-19 21:49:54 +00:00
|
|
|
}
|
|
|
|
|
2006-09-15 03:57:23 +00:00
|
|
|
/// getMBBStartIdx - Return the base index of the first instruction in the
|
|
|
|
/// specified MachineBasicBlock.
|
|
|
|
unsigned getMBBStartIdx(MachineBasicBlock *MBB) const {
|
|
|
|
return getMBBStartIdx(MBB->getNumber());
|
|
|
|
}
|
|
|
|
unsigned getMBBStartIdx(unsigned MBBNo) const {
|
|
|
|
assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
|
2007-08-13 23:45:17 +00:00
|
|
|
return MBB2IdxMap[MBBNo].first;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getMBBEndIdx - Return the store index of the last instruction in the
|
|
|
|
/// specified MachineBasicBlock.
|
|
|
|
unsigned getMBBEndIdx(MachineBasicBlock *MBB) const {
|
|
|
|
return getMBBEndIdx(MBB->getNumber());
|
|
|
|
}
|
|
|
|
unsigned getMBBEndIdx(unsigned MBBNo) const {
|
|
|
|
assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
|
|
|
|
return MBB2IdxMap[MBBNo].second;
|
2006-09-15 03:57:23 +00:00
|
|
|
}
|
|
|
|
|
2008-07-22 22:46:49 +00:00
|
|
|
/// getScaledIntervalSize - get the size of an interval in "units,"
|
2008-06-23 23:25:37 +00:00
|
|
|
/// where every function is composed of one thousand units. This
|
|
|
|
/// measure scales properly with empty index slots in the function.
|
2008-07-22 22:46:49 +00:00
|
|
|
double getScaledIntervalSize(LiveInterval& I) {
|
|
|
|
return (1000.0 / InstrSlots::NUM * I.getSize()) / i2miMap_.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getApproximateInstructionCount - computes an estimate of the number
|
|
|
|
/// of instructions in a given LiveInterval.
|
|
|
|
unsigned getApproximateInstructionCount(LiveInterval& I) {
|
|
|
|
double IntervalPercentage = getScaledIntervalSize(I) / 1000.0;
|
2008-08-07 13:36:30 +00:00
|
|
|
return (unsigned)(IntervalPercentage * FunctionSize);
|
2008-06-23 23:25:37 +00:00
|
|
|
}
|
|
|
|
|
2008-02-18 09:35:30 +00:00
|
|
|
/// getMBBFromIndex - given an index in any instruction of an
|
|
|
|
/// MBB return a pointer the MBB
|
|
|
|
MachineBasicBlock* getMBBFromIndex(unsigned index) const {
|
|
|
|
std::vector<IdxMBBPair>::const_iterator I =
|
2008-02-26 10:49:39 +00:00
|
|
|
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), index);
|
2008-02-18 09:35:30 +00:00
|
|
|
// Take the pair containing the index
|
|
|
|
std::vector<IdxMBBPair>::const_iterator J =
|
2008-02-26 10:49:39 +00:00
|
|
|
((I != Idx2MBBMap.end() && I->first > index) ||
|
|
|
|
(I == Idx2MBBMap.end() && Idx2MBBMap.size()>0)) ? (I-1): I;
|
2008-02-18 09:35:30 +00:00
|
|
|
|
|
|
|
assert(J != Idx2MBBMap.end() && J->first < index+1 &&
|
2008-02-26 10:49:39 +00:00
|
|
|
index <= getMBBEndIdx(J->second) &&
|
|
|
|
"index does not correspond to an MBB");
|
2008-02-18 09:35:30 +00:00
|
|
|
return J->second;
|
|
|
|
}
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
/// getInstructionIndex - returns the base index of instr
|
2009-07-22 21:56:14 +00:00
|
|
|
unsigned getInstructionIndex(const MachineInstr* instr) const {
|
2004-08-04 09:46:26 +00:00
|
|
|
Mi2IndexMap::const_iterator it = mi2iMap_.find(instr);
|
|
|
|
assert(it != mi2iMap_.end() && "Invalid instruction!");
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getInstructionFromIndex - given an index in any slot of an
|
|
|
|
/// instruction return a pointer the instruction
|
|
|
|
MachineInstr* getInstructionFromIndex(unsigned index) const {
|
|
|
|
index /= InstrSlots::NUM; // convert index to vector index
|
|
|
|
assert(index < i2miMap_.size() &&
|
|
|
|
"index does not correspond to an instruction");
|
|
|
|
return i2miMap_[index];
|
|
|
|
}
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2008-10-23 20:43:13 +00:00
|
|
|
/// hasGapBeforeInstr - Return true if the previous instruction slot,
|
|
|
|
/// i.e. Index - InstrSlots::NUM, is not occupied.
|
|
|
|
bool hasGapBeforeInstr(unsigned Index) {
|
|
|
|
Index = getBaseIndex(Index - InstrSlots::NUM);
|
|
|
|
return getInstructionFromIndex(Index) == 0;
|
|
|
|
}
|
|
|
|
|
2009-06-02 16:53:25 +00:00
|
|
|
/// hasGapAfterInstr - Return true if the successive instruction slot,
|
|
|
|
/// i.e. Index + InstrSlots::Num, is not occupied.
|
|
|
|
bool hasGapAfterInstr(unsigned Index) {
|
|
|
|
Index = getBaseIndex(Index + InstrSlots::NUM);
|
|
|
|
return getInstructionFromIndex(Index) == 0;
|
|
|
|
}
|
|
|
|
|
2008-10-23 20:43:13 +00:00
|
|
|
/// findGapBeforeInstr - Find an empty instruction slot before the
|
|
|
|
/// specified index. If "Furthest" is true, find one that's furthest
|
|
|
|
/// away from the index (but before any index that's occupied).
|
|
|
|
unsigned findGapBeforeInstr(unsigned Index, bool Furthest = false) {
|
|
|
|
Index = getBaseIndex(Index - InstrSlots::NUM);
|
|
|
|
if (getInstructionFromIndex(Index))
|
|
|
|
return 0; // No gap!
|
|
|
|
if (!Furthest)
|
|
|
|
return Index;
|
|
|
|
unsigned PrevIndex = getBaseIndex(Index - InstrSlots::NUM);
|
|
|
|
while (getInstructionFromIndex(Index)) {
|
|
|
|
Index = PrevIndex;
|
|
|
|
PrevIndex = getBaseIndex(Index - InstrSlots::NUM);
|
|
|
|
}
|
|
|
|
return Index;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// InsertMachineInstrInMaps - Insert the specified machine instruction
|
|
|
|
/// into the instruction index map at the given index.
|
|
|
|
void InsertMachineInstrInMaps(MachineInstr *MI, unsigned Index) {
|
|
|
|
i2miMap_[Index / InstrSlots::NUM] = MI;
|
|
|
|
Mi2IndexMap::iterator it = mi2iMap_.find(MI);
|
|
|
|
assert(it == mi2iMap_.end() && "Already in map!");
|
|
|
|
mi2iMap_[MI] = Index;
|
|
|
|
}
|
|
|
|
|
2007-11-03 07:20:12 +00:00
|
|
|
/// conflictsWithPhysRegDef - Returns true if the specified register
|
|
|
|
/// is defined during the duration of the specified interval.
|
|
|
|
bool conflictsWithPhysRegDef(const LiveInterval &li, VirtRegMap &vrm,
|
|
|
|
unsigned reg);
|
|
|
|
|
2009-01-07 02:08:57 +00:00
|
|
|
/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
|
|
|
|
/// it can check use as well.
|
|
|
|
bool conflictsWithPhysRegRef(LiveInterval &li, unsigned Reg,
|
|
|
|
bool CheckUse,
|
|
|
|
SmallPtrSet<MachineInstr*,32> &JoinedCopies);
|
|
|
|
|
2007-10-17 02:10:22 +00:00
|
|
|
/// findLiveInMBBs - Given a live range, if the value of the range
|
|
|
|
/// is live in any MBB returns true as well as the list of basic blocks
|
2008-07-28 18:42:57 +00:00
|
|
|
/// in which the value is live.
|
2008-10-29 05:06:14 +00:00
|
|
|
bool findLiveInMBBs(unsigned Start, unsigned End,
|
|
|
|
SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
|
|
|
|
|
|
|
|
/// findReachableMBBs - Return a list MBB that can be reached via any
|
|
|
|
/// branch or fallthroughs. Return true if the list is not empty.
|
|
|
|
bool findReachableMBBs(unsigned Start, unsigned End,
|
2007-10-17 06:53:44 +00:00
|
|
|
SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
|
2007-10-17 02:10:22 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
// Interval creation
|
|
|
|
|
|
|
|
LiveInterval &getOrCreateInterval(unsigned reg) {
|
|
|
|
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
|
|
|
|
if (I == r2iMap_.end())
|
2008-08-13 22:08:30 +00:00
|
|
|
I = r2iMap_.insert(std::make_pair(reg, createInterval(reg))).first;
|
2008-08-13 21:49:13 +00:00
|
|
|
return *I->second;
|
2007-06-08 17:18:56 +00:00
|
|
|
}
|
2009-02-08 11:04:35 +00:00
|
|
|
|
|
|
|
/// dupInterval - Duplicate a live interval. The caller is responsible for
|
|
|
|
/// managing the allocated memory.
|
|
|
|
LiveInterval *dupInterval(LiveInterval *li);
|
2008-06-05 17:15:43 +00:00
|
|
|
|
|
|
|
/// addLiveRangeToEndOfBlock - Given a register and an instruction,
|
|
|
|
/// adds a live range from that instruction to the end of its MBB.
|
|
|
|
LiveRange addLiveRangeToEndOfBlock(unsigned reg,
|
|
|
|
MachineInstr* startInst);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
// Interval removal
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
void removeInterval(unsigned Reg) {
|
2008-08-13 22:08:30 +00:00
|
|
|
DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.find(Reg);
|
2008-08-13 21:49:13 +00:00
|
|
|
delete I->second;
|
|
|
|
r2iMap_.erase(I);
|
2006-12-17 05:15:13 +00:00
|
|
|
}
|
2004-09-30 15:59:17 +00:00
|
|
|
|
2009-04-21 22:46:52 +00:00
|
|
|
/// isNotInMIMap - returns true if the specified machine instr has been
|
|
|
|
/// removed or was never entered in the map.
|
|
|
|
bool isNotInMIMap(MachineInstr* instr) const {
|
2007-02-22 23:52:23 +00:00
|
|
|
return !mi2iMap_.count(instr);
|
2007-02-22 23:03:39 +00:00
|
|
|
}
|
|
|
|
|
2006-08-24 22:43:55 +00:00
|
|
|
/// RemoveMachineInstrFromMaps - This marks the specified machine instr as
|
|
|
|
/// deleted.
|
|
|
|
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
|
|
|
|
// remove index -> MachineInstr and
|
|
|
|
// MachineInstr -> index mappings
|
|
|
|
Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
|
|
|
|
if (mi2i != mi2iMap_.end()) {
|
|
|
|
i2miMap_[mi2i->second/InstrSlots::NUM] = 0;
|
|
|
|
mi2iMap_.erase(mi2i);
|
|
|
|
}
|
|
|
|
}
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2008-02-13 03:01:43 +00:00
|
|
|
/// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
|
|
|
|
/// maps used by register allocator.
|
|
|
|
void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
|
|
|
|
Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
|
2008-02-13 09:18:16 +00:00
|
|
|
if (mi2i == mi2iMap_.end())
|
|
|
|
return;
|
|
|
|
i2miMap_[mi2i->second/InstrSlots::NUM] = NewMI;
|
|
|
|
Mi2IndexMap::iterator it = mi2iMap_.find(MI);
|
|
|
|
assert(it != mi2iMap_.end() && "Invalid instruction!");
|
|
|
|
unsigned Index = it->second;
|
|
|
|
mi2iMap_.erase(it);
|
|
|
|
mi2iMap_[NewMI] = Index;
|
2008-02-13 03:01:43 +00:00
|
|
|
}
|
|
|
|
|
2007-09-05 21:46:51 +00:00
|
|
|
BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; }
|
|
|
|
|
2008-02-15 18:24:29 +00:00
|
|
|
/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
|
|
|
|
/// copy field and returns the source register that defines it.
|
|
|
|
unsigned getVNInfoSourceReg(const VNInfo *VNI) const;
|
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
|
|
|
virtual void releaseMemory();
|
|
|
|
|
|
|
|
/// runOnMachineFunction - pass entry point
|
|
|
|
virtual bool runOnMachineFunction(MachineFunction&);
|
|
|
|
|
|
|
|
/// print - Implement the dump method.
|
2009-08-23 06:03:38 +00:00
|
|
|
virtual void print(raw_ostream &O, const Module* = 0) const;
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2007-11-12 06:35:08 +00:00
|
|
|
/// addIntervalsForSpills - Create new intervals for spilled defs / uses of
|
2008-06-06 07:54:39 +00:00
|
|
|
/// the given interval. FIXME: It also returns the weight of the spill slot
|
|
|
|
/// (if any is created) by reference. This is temporary.
|
2007-11-12 06:35:08 +00:00
|
|
|
std::vector<LiveInterval*>
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
addIntervalsForSpills(const LiveInterval& i,
|
2008-09-30 15:44:16 +00:00
|
|
|
SmallVectorImpl<LiveInterval*> &SpillIs,
|
2009-05-03 18:32:42 +00:00
|
|
|
const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
|
2008-08-18 18:05:32 +00:00
|
|
|
|
|
|
|
/// addIntervalsForSpillsFast - Quickly create new intervals for spilled
|
|
|
|
/// defs / uses without remat or splitting.
|
|
|
|
std::vector<LiveInterval*>
|
|
|
|
addIntervalsForSpillsFast(const LiveInterval &li,
|
2009-05-03 18:32:42 +00:00
|
|
|
const MachineLoopInfo *loopInfo, VirtRegMap &vrm);
|
2007-11-12 06:35:08 +00:00
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
|
2009-03-23 18:24:37 +00:00
|
|
|
/// around all defs and uses of the specified interval. Return true if it
|
|
|
|
/// was able to cut its interval.
|
|
|
|
bool spillPhysRegAroundRegDefsUses(const LiveInterval &li,
|
2008-03-11 07:19:34 +00:00
|
|
|
unsigned PhysReg, VirtRegMap &vrm);
|
|
|
|
|
2007-12-06 00:01:56 +00:00
|
|
|
/// isReMaterializable - Returns true if every definition of MI of every
|
|
|
|
/// val# of the specified interval is re-materializable. Also returns true
|
|
|
|
/// by reference if all of the defs are load instructions.
|
2008-09-30 15:44:16 +00:00
|
|
|
bool isReMaterializable(const LiveInterval &li,
|
|
|
|
SmallVectorImpl<LiveInterval*> &SpillIs,
|
|
|
|
bool &isLoad);
|
2007-12-06 00:01:56 +00:00
|
|
|
|
2008-10-24 02:05:00 +00:00
|
|
|
/// isReMaterializable - Returns true if the definition MI of the specified
|
|
|
|
/// val# of the specified interval is re-materializable.
|
|
|
|
bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
|
|
|
|
MachineInstr *MI);
|
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// getRepresentativeReg - Find the largest super register of the specified
|
|
|
|
/// physical register.
|
|
|
|
unsigned getRepresentativeReg(unsigned Reg) const;
|
|
|
|
|
|
|
|
/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
|
|
|
|
/// specified interval that conflicts with the specified physical register.
|
|
|
|
unsigned getNumConflictsWithPhysReg(const LiveInterval &li,
|
|
|
|
unsigned PhysReg) const;
|
|
|
|
|
2009-07-01 01:59:31 +00:00
|
|
|
/// processImplicitDefs - Process IMPLICIT_DEF instructions. Add isUndef
|
|
|
|
/// marker to implicit_def defs and their uses.
|
|
|
|
void processImplicitDefs();
|
|
|
|
|
2008-05-30 20:14:04 +00:00
|
|
|
/// computeNumbering - Compute the index numbering.
|
|
|
|
void computeNumbering();
|
|
|
|
|
2009-06-02 16:53:25 +00:00
|
|
|
/// scaleNumbering - Rescale interval numbers to introduce gaps for new
|
|
|
|
/// instructions
|
|
|
|
void scaleNumbering(int factor);
|
|
|
|
|
2009-01-13 06:05:10 +00:00
|
|
|
/// intervalIsInOneMBB - Returns true if the specified interval is entirely
|
|
|
|
/// within a single basic block.
|
|
|
|
bool intervalIsInOneMBB(const LiveInterval &li) const;
|
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
private:
|
2006-09-15 03:57:23 +00:00
|
|
|
/// computeIntervals - Compute live intervals.
|
2006-09-14 06:42:17 +00:00
|
|
|
void computeIntervals();
|
2006-09-02 05:26:01 +00:00
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
/// handleRegisterDef - update intervals for a register def
|
|
|
|
/// (calls handlePhysicalRegisterDef and
|
|
|
|
/// handleVirtualRegisterDef)
|
2006-09-03 08:07:11 +00:00
|
|
|
void handleRegisterDef(MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock::iterator MI, unsigned MIIdx,
|
2008-07-10 07:35:43 +00:00
|
|
|
MachineOperand& MO, unsigned MOIdx);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
|
|
|
/// handleVirtualRegisterDef - update intervals for a virtual
|
|
|
|
/// register def
|
2006-09-03 08:07:11 +00:00
|
|
|
void handleVirtualRegisterDef(MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock::iterator MI,
|
2008-06-25 23:39:39 +00:00
|
|
|
unsigned MIIdx, MachineOperand& MO,
|
2008-07-10 07:35:43 +00:00
|
|
|
unsigned MOIdx, LiveInterval& interval);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
/// handlePhysicalRegisterDef - update intervals for a physical register
|
2006-08-24 22:43:55 +00:00
|
|
|
/// def.
|
2004-08-04 09:46:26 +00:00
|
|
|
void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
|
|
|
|
MachineBasicBlock::iterator mi,
|
2008-06-25 23:39:39 +00:00
|
|
|
unsigned MIIdx, MachineOperand& MO,
|
2006-08-31 05:54:43 +00:00
|
|
|
LiveInterval &interval,
|
2008-02-15 18:24:29 +00:00
|
|
|
MachineInstr *CopyMI);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-02-19 21:49:54 +00:00
|
|
|
/// handleLiveInRegister - Create interval for a livein register.
|
2007-02-21 22:41:17 +00:00
|
|
|
void handleLiveInRegister(MachineBasicBlock* mbb,
|
|
|
|
unsigned MIIdx,
|
2007-04-25 07:30:23 +00:00
|
|
|
LiveInterval &interval, bool isAlias = false);
|
2007-02-19 21:49:54 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// getReMatImplicitUse - If the remat definition MI has one (for now, we
|
|
|
|
/// only allow one) virtual register operand, then its uses are implicitly
|
|
|
|
/// using the register. Returns the virtual register.
|
|
|
|
unsigned getReMatImplicitUse(const LiveInterval &li,
|
|
|
|
MachineInstr *MI) const;
|
|
|
|
|
|
|
|
/// isValNoAvailableAt - Return true if the val# of the specified interval
|
|
|
|
/// which reaches the given instruction also reaches the specified use
|
|
|
|
/// index.
|
|
|
|
bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
|
|
|
|
unsigned UseIdx) const;
|
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
/// isReMaterializable - Returns true if the definition MI of the specified
|
2007-12-06 00:01:56 +00:00
|
|
|
/// val# of the specified interval is re-materializable. Also returns true
|
|
|
|
/// by reference if the def is a load.
|
2007-08-29 20:45:00 +00:00
|
|
|
bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
|
2008-09-30 15:44:16 +00:00
|
|
|
MachineInstr *MI,
|
|
|
|
SmallVectorImpl<LiveInterval*> &SpillIs,
|
|
|
|
bool &isLoad);
|
2007-08-13 23:45:17 +00:00
|
|
|
|
2007-08-30 05:52:20 +00:00
|
|
|
/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
|
|
|
|
/// slot / to reg or any rematerialized load into ith operand of specified
|
|
|
|
/// MI. If it is successul, MI is updated with the newly created MI and
|
|
|
|
/// returns true.
|
|
|
|
bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
|
2007-11-30 21:23:43 +00:00
|
|
|
MachineInstr *DefMI, unsigned InstrIdx,
|
2007-12-02 08:30:39 +00:00
|
|
|
SmallVector<unsigned, 2> &Ops,
|
2007-11-30 21:23:43 +00:00
|
|
|
bool isSS, int Slot, unsigned Reg);
|
2007-08-13 23:45:17 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// canFoldMemoryOperand - Return true if the specified load / store
|
2007-12-05 03:22:34 +00:00
|
|
|
/// folding is possible.
|
2007-12-05 03:14:33 +00:00
|
|
|
bool canFoldMemoryOperand(MachineInstr *MI,
|
2008-02-25 08:50:41 +00:00
|
|
|
SmallVector<unsigned, 2> &Ops,
|
|
|
|
bool ReMatLoadSS) const;
|
2007-12-05 03:14:33 +00:00
|
|
|
|
2007-11-29 01:06:25 +00:00
|
|
|
/// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
|
|
|
|
/// VNInfo that's after the specified index but is within the basic block.
|
|
|
|
bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI,
|
|
|
|
MachineBasicBlock *MBB, unsigned Idx) const;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// hasAllocatableSuperReg - Return true if the specified physical register
|
|
|
|
/// has any super register that's allocatable.
|
|
|
|
bool hasAllocatableSuperReg(unsigned Reg) const;
|
|
|
|
|
2007-11-29 10:12:14 +00:00
|
|
|
/// SRInfo - Spill / restore info.
|
|
|
|
struct SRInfo {
|
|
|
|
int index;
|
|
|
|
unsigned vreg;
|
|
|
|
bool canFold;
|
|
|
|
SRInfo(int i, unsigned vr, bool f) : index(i), vreg(vr), canFold(f) {};
|
|
|
|
};
|
|
|
|
|
|
|
|
bool alsoFoldARestore(int Id, int index, unsigned vr,
|
|
|
|
BitVector &RestoreMBBs,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
2007-11-29 10:12:14 +00:00
|
|
|
void eraseRestoreInfo(int Id, int index, unsigned vr,
|
|
|
|
BitVector &RestoreMBBs,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
2007-11-29 10:12:14 +00:00
|
|
|
|
2008-04-11 17:53:36 +00:00
|
|
|
/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
|
|
|
|
/// spilled and create empty intervals for their uses.
|
|
|
|
void handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
|
|
|
|
const TargetRegisterClass* rc,
|
|
|
|
std::vector<LiveInterval*> &NewLIs);
|
2008-04-03 16:39:43 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
|
|
|
|
/// interval on to-be re-materialized operands of MI) with new register.
|
|
|
|
void rewriteImplicitOps(const LiveInterval &li,
|
|
|
|
MachineInstr *MI, unsigned NewVReg, VirtRegMap &vrm);
|
|
|
|
|
2007-12-31 04:13:23 +00:00
|
|
|
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper
|
|
|
|
/// functions for addIntervalsForSpills to rewrite uses / defs for the given
|
|
|
|
/// live range.
|
2008-02-22 09:24:50 +00:00
|
|
|
bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
|
|
|
bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
|
2007-11-12 06:35:08 +00:00
|
|
|
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
|
|
|
|
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
2008-02-22 09:24:50 +00:00
|
|
|
VirtRegMap &vrm, const TargetRegisterClass* rc,
|
|
|
|
SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
|
2008-02-23 00:46:11 +00:00
|
|
|
unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,unsigned> &MBBVRegsMap,
|
2009-05-03 18:32:42 +00:00
|
|
|
std::vector<LiveInterval*> &NewLIs);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
2007-11-12 06:35:08 +00:00
|
|
|
LiveInterval::Ranges::const_iterator &I,
|
|
|
|
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
|
|
|
|
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
2008-02-22 09:24:50 +00:00
|
|
|
VirtRegMap &vrm, const TargetRegisterClass* rc,
|
2007-12-11 02:09:15 +00:00
|
|
|
SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
BitVector &SpillMBBs,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> > &SpillIdxes,
|
2007-11-29 01:06:25 +00:00
|
|
|
BitVector &RestoreMBBs,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes,
|
|
|
|
DenseMap<unsigned,unsigned> &MBBVRegsMap,
|
2009-05-03 18:32:42 +00:00
|
|
|
std::vector<LiveInterval*> &NewLIs);
|
2007-11-12 06:35:08 +00:00
|
|
|
|
2008-08-13 21:49:13 +00:00
|
|
|
static LiveInterval* createInterval(unsigned Reg);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
|
|
|
void printRegName(unsigned reg) const;
|
|
|
|
};
|
2003-11-20 03:32:25 +00:00
|
|
|
} // End llvm namespace
|
|
|
|
|
|
|
|
#endif
|