2004-07-23 17:56:30 +00:00
|
|
|
//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 19:59:42 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2004-07-19 02:13:59 +00:00
|
|
|
// This file implements the LiveInterval analysis pass. Given some numbering of
|
|
|
|
// each the machine instructions (in this implemention depth-first order) an
|
|
|
|
// interval [i, j) is said to be a live interval for register v if there is no
|
|
|
|
// instruction with number j' > j such that v is live at j' abd there is no
|
|
|
|
// instruction with number i' < i such that v is live at i'. In this
|
|
|
|
// implementation intervals can have holes, i.e. an interval might look like
|
|
|
|
// [1,20), [50,65), [1000,1001).
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2004-07-23 17:56:30 +00:00
|
|
|
#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
|
|
|
|
#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
|
2003-11-20 03:32:25 +00:00
|
|
|
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2005-09-21 04:18:25 +00:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2007-02-15 05:59:24 +00:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2007-04-17 20:32:26 +00:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2007-08-13 23:45:17 +00:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2007-09-05 21:46:51 +00:00
|
|
|
#include "llvm/Support/Allocator.h"
|
2007-11-13 23:04:28 +00:00
|
|
|
#include <cmath>
|
2003-11-20 03:32:25 +00:00
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
class LiveVariables;
|
2007-12-11 02:09:15 +00:00
|
|
|
class MachineLoopInfo;
|
2008-02-10 18:45:23 +00:00
|
|
|
class TargetRegisterInfo;
|
2007-12-31 04:13:23 +00:00
|
|
|
class MachineRegisterInfo;
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
class TargetInstrInfo;
|
2007-04-17 20:32:26 +00:00
|
|
|
class TargetRegisterClass;
|
2004-08-04 09:46:26 +00:00
|
|
|
class VirtRegMap;
|
2007-10-17 02:10:22 +00:00
|
|
|
typedef std::pair<unsigned, MachineBasicBlock*> IdxMBBPair;
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2008-02-18 09:35:30 +00:00
|
|
|
inline bool operator<(unsigned V, const IdxMBBPair &IM) {
|
|
|
|
return V < IM.first;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool operator<(const IdxMBBPair &IM, unsigned V) {
|
|
|
|
return IM.first < V;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct Idx2MBBCompare {
|
|
|
|
bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
|
|
|
|
return LHS.first < RHS.first;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
class LiveIntervals : public MachineFunctionPass {
|
|
|
|
MachineFunction* mf_;
|
2008-02-22 09:24:50 +00:00
|
|
|
MachineRegisterInfo* mri_;
|
2004-08-04 09:46:26 +00:00
|
|
|
const TargetMachine* tm_;
|
2008-02-10 18:45:23 +00:00
|
|
|
const TargetRegisterInfo* tri_;
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
const TargetInstrInfo* tii_;
|
2004-08-04 09:46:26 +00:00
|
|
|
LiveVariables* lv_;
|
|
|
|
|
2007-09-05 21:46:51 +00:00
|
|
|
/// Special pool allocator for VNInfo's (LiveInterval val#).
|
|
|
|
///
|
|
|
|
BumpPtrAllocator VNInfoAllocator;
|
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
/// MBB2IdxMap - The indexes of the first and last instructions in the
|
|
|
|
/// specified basic block.
|
|
|
|
std::vector<std::pair<unsigned, unsigned> > MBB2IdxMap;
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2007-10-17 02:10:22 +00:00
|
|
|
/// Idx2MBBMap - Sorted list of pairs of index of first instruction
|
|
|
|
/// and MBB id.
|
|
|
|
std::vector<IdxMBBPair> Idx2MBBMap;
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
typedef std::map<MachineInstr*, unsigned> Mi2IndexMap;
|
|
|
|
Mi2IndexMap mi2iMap_;
|
|
|
|
|
|
|
|
typedef std::vector<MachineInstr*> Index2MiMap;
|
|
|
|
Index2MiMap i2miMap_;
|
|
|
|
|
|
|
|
typedef std::map<unsigned, LiveInterval> Reg2IntervalMap;
|
|
|
|
Reg2IntervalMap r2iMap_;
|
|
|
|
|
2007-02-15 05:59:24 +00:00
|
|
|
BitVector allocatableRegs_;
|
2007-03-01 02:03:03 +00:00
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
std::vector<MachineInstr*> ClonedMIs;
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
public:
|
2007-05-06 13:37:16 +00:00
|
|
|
static char ID; // Pass identification, replacement for typeid
|
2007-05-01 21:15:47 +00:00
|
|
|
LiveIntervals() : MachineFunctionPass((intptr_t)&ID) {}
|
|
|
|
|
2006-08-24 22:43:55 +00:00
|
|
|
struct InstrSlots {
|
2004-08-04 09:46:26 +00:00
|
|
|
enum {
|
|
|
|
LOAD = 0,
|
|
|
|
USE = 1,
|
|
|
|
DEF = 2,
|
|
|
|
STORE = 3,
|
2006-02-22 16:23:43 +00:00
|
|
|
NUM = 4
|
2004-08-04 09:46:26 +00:00
|
|
|
};
|
2003-11-20 03:32:25 +00:00
|
|
|
};
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
static unsigned getBaseIndex(unsigned index) {
|
|
|
|
return index - (index % InstrSlots::NUM);
|
|
|
|
}
|
|
|
|
static unsigned getBoundaryIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index + InstrSlots::NUM - 1);
|
|
|
|
}
|
|
|
|
static unsigned getLoadIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index) + InstrSlots::LOAD;
|
|
|
|
}
|
|
|
|
static unsigned getUseIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index) + InstrSlots::USE;
|
|
|
|
}
|
|
|
|
static unsigned getDefIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index) + InstrSlots::DEF;
|
|
|
|
}
|
|
|
|
static unsigned getStoreIndex(unsigned index) {
|
|
|
|
return getBaseIndex(index) + InstrSlots::STORE;
|
|
|
|
}
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
|
|
|
|
return (isDef + isUse) * powf(10.0F, (float)loopDepth);
|
2007-11-12 06:35:08 +00:00
|
|
|
}
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
typedef Reg2IntervalMap::iterator iterator;
|
2004-09-30 15:59:17 +00:00
|
|
|
typedef Reg2IntervalMap::const_iterator const_iterator;
|
|
|
|
const_iterator begin() const { return r2iMap_.begin(); }
|
|
|
|
const_iterator end() const { return r2iMap_.end(); }
|
2004-08-04 09:46:26 +00:00
|
|
|
iterator begin() { return r2iMap_.begin(); }
|
|
|
|
iterator end() { return r2iMap_.end(); }
|
|
|
|
unsigned getNumIntervals() const { return r2iMap_.size(); }
|
|
|
|
|
|
|
|
LiveInterval &getInterval(unsigned reg) {
|
|
|
|
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
|
|
|
|
assert(I != r2iMap_.end() && "Interval does not exist for register");
|
|
|
|
return I->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
const LiveInterval &getInterval(unsigned reg) const {
|
|
|
|
Reg2IntervalMap::const_iterator I = r2iMap_.find(reg);
|
|
|
|
assert(I != r2iMap_.end() && "Interval does not exist for register");
|
|
|
|
return I->second;
|
|
|
|
}
|
|
|
|
|
2007-02-19 21:49:54 +00:00
|
|
|
bool hasInterval(unsigned reg) const {
|
2007-03-01 02:03:03 +00:00
|
|
|
return r2iMap_.count(reg);
|
2007-02-19 21:49:54 +00:00
|
|
|
}
|
|
|
|
|
2006-09-15 03:57:23 +00:00
|
|
|
/// getMBBStartIdx - Return the base index of the first instruction in the
|
|
|
|
/// specified MachineBasicBlock.
|
|
|
|
unsigned getMBBStartIdx(MachineBasicBlock *MBB) const {
|
|
|
|
return getMBBStartIdx(MBB->getNumber());
|
|
|
|
}
|
|
|
|
unsigned getMBBStartIdx(unsigned MBBNo) const {
|
|
|
|
assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
|
2007-08-13 23:45:17 +00:00
|
|
|
return MBB2IdxMap[MBBNo].first;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getMBBEndIdx - Return the store index of the last instruction in the
|
|
|
|
/// specified MachineBasicBlock.
|
|
|
|
unsigned getMBBEndIdx(MachineBasicBlock *MBB) const {
|
|
|
|
return getMBBEndIdx(MBB->getNumber());
|
|
|
|
}
|
|
|
|
unsigned getMBBEndIdx(unsigned MBBNo) const {
|
|
|
|
assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
|
|
|
|
return MBB2IdxMap[MBBNo].second;
|
2006-09-15 03:57:23 +00:00
|
|
|
}
|
|
|
|
|
2008-02-18 09:35:30 +00:00
|
|
|
/// getMBBFromIndex - given an index in any instruction of an
|
|
|
|
/// MBB return a pointer the MBB
|
|
|
|
MachineBasicBlock* getMBBFromIndex(unsigned index) const {
|
|
|
|
std::vector<IdxMBBPair>::const_iterator I =
|
2008-02-26 10:49:39 +00:00
|
|
|
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), index);
|
2008-02-18 09:35:30 +00:00
|
|
|
// Take the pair containing the index
|
|
|
|
std::vector<IdxMBBPair>::const_iterator J =
|
2008-02-26 10:49:39 +00:00
|
|
|
((I != Idx2MBBMap.end() && I->first > index) ||
|
|
|
|
(I == Idx2MBBMap.end() && Idx2MBBMap.size()>0)) ? (I-1): I;
|
2008-02-18 09:35:30 +00:00
|
|
|
|
|
|
|
assert(J != Idx2MBBMap.end() && J->first < index+1 &&
|
2008-02-26 10:49:39 +00:00
|
|
|
index <= getMBBEndIdx(J->second) &&
|
|
|
|
"index does not correspond to an MBB");
|
2008-02-18 09:35:30 +00:00
|
|
|
return J->second;
|
|
|
|
}
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
/// getInstructionIndex - returns the base index of instr
|
|
|
|
unsigned getInstructionIndex(MachineInstr* instr) const {
|
|
|
|
Mi2IndexMap::const_iterator it = mi2iMap_.find(instr);
|
|
|
|
assert(it != mi2iMap_.end() && "Invalid instruction!");
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getInstructionFromIndex - given an index in any slot of an
|
|
|
|
/// instruction return a pointer the instruction
|
|
|
|
MachineInstr* getInstructionFromIndex(unsigned index) const {
|
|
|
|
index /= InstrSlots::NUM; // convert index to vector index
|
|
|
|
assert(index < i2miMap_.size() &&
|
|
|
|
"index does not correspond to an instruction");
|
|
|
|
return i2miMap_[index];
|
|
|
|
}
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2007-11-03 07:20:12 +00:00
|
|
|
/// conflictsWithPhysRegDef - Returns true if the specified register
|
|
|
|
/// is defined during the duration of the specified interval.
|
|
|
|
bool conflictsWithPhysRegDef(const LiveInterval &li, VirtRegMap &vrm,
|
|
|
|
unsigned reg);
|
|
|
|
|
2007-10-17 02:10:22 +00:00
|
|
|
/// findLiveInMBBs - Given a live range, if the value of the range
|
|
|
|
/// is live in any MBB returns true as well as the list of basic blocks
|
|
|
|
/// where the value is live in.
|
|
|
|
bool findLiveInMBBs(const LiveRange &LR,
|
2007-10-17 06:53:44 +00:00
|
|
|
SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
|
2007-10-17 02:10:22 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
// Interval creation
|
|
|
|
|
|
|
|
LiveInterval &getOrCreateInterval(unsigned reg) {
|
|
|
|
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
|
|
|
|
if (I == r2iMap_.end())
|
|
|
|
I = r2iMap_.insert(I, std::make_pair(reg, createInterval(reg)));
|
|
|
|
return I->second;
|
|
|
|
}
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
// Interval removal
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
void removeInterval(unsigned Reg) {
|
|
|
|
r2iMap_.erase(Reg);
|
2006-12-17 05:15:13 +00:00
|
|
|
}
|
2004-09-30 15:59:17 +00:00
|
|
|
|
2007-02-22 23:03:39 +00:00
|
|
|
/// isRemoved - returns true if the specified machine instr has been
|
|
|
|
/// removed.
|
|
|
|
bool isRemoved(MachineInstr* instr) const {
|
2007-02-22 23:52:23 +00:00
|
|
|
return !mi2iMap_.count(instr);
|
2007-02-22 23:03:39 +00:00
|
|
|
}
|
|
|
|
|
2006-08-24 22:43:55 +00:00
|
|
|
/// RemoveMachineInstrFromMaps - This marks the specified machine instr as
|
|
|
|
/// deleted.
|
|
|
|
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
|
|
|
|
// remove index -> MachineInstr and
|
|
|
|
// MachineInstr -> index mappings
|
|
|
|
Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
|
|
|
|
if (mi2i != mi2iMap_.end()) {
|
|
|
|
i2miMap_[mi2i->second/InstrSlots::NUM] = 0;
|
|
|
|
mi2iMap_.erase(mi2i);
|
|
|
|
}
|
|
|
|
}
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2008-02-13 03:01:43 +00:00
|
|
|
/// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
|
|
|
|
/// maps used by register allocator.
|
|
|
|
void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
|
|
|
|
Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
|
2008-02-13 09:18:16 +00:00
|
|
|
if (mi2i == mi2iMap_.end())
|
|
|
|
return;
|
|
|
|
i2miMap_[mi2i->second/InstrSlots::NUM] = NewMI;
|
|
|
|
Mi2IndexMap::iterator it = mi2iMap_.find(MI);
|
|
|
|
assert(it != mi2iMap_.end() && "Invalid instruction!");
|
|
|
|
unsigned Index = it->second;
|
|
|
|
mi2iMap_.erase(it);
|
|
|
|
mi2iMap_[NewMI] = Index;
|
2008-02-13 03:01:43 +00:00
|
|
|
}
|
|
|
|
|
2007-09-05 21:46:51 +00:00
|
|
|
BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; }
|
|
|
|
|
2008-02-15 18:24:29 +00:00
|
|
|
/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
|
|
|
|
/// copy field and returns the source register that defines it.
|
|
|
|
unsigned getVNInfoSourceReg(const VNInfo *VNI) const;
|
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
|
|
|
virtual void releaseMemory();
|
|
|
|
|
|
|
|
/// runOnMachineFunction - pass entry point
|
|
|
|
virtual bool runOnMachineFunction(MachineFunction&);
|
|
|
|
|
|
|
|
/// print - Implement the dump method.
|
|
|
|
virtual void print(std::ostream &O, const Module* = 0) const;
|
|
|
|
void print(std::ostream *O, const Module* M = 0) const {
|
|
|
|
if (O) print(*O, M);
|
|
|
|
}
|
|
|
|
|
2007-11-12 06:35:08 +00:00
|
|
|
/// addIntervalsForSpills - Create new intervals for spilled defs / uses of
|
|
|
|
/// the given interval.
|
|
|
|
std::vector<LiveInterval*>
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
addIntervalsForSpills(const LiveInterval& i,
|
2007-12-11 02:09:15 +00:00
|
|
|
const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
|
2007-11-12 06:35:08 +00:00
|
|
|
|
2007-12-06 00:01:56 +00:00
|
|
|
/// isReMaterializable - Returns true if every definition of MI of every
|
|
|
|
/// val# of the specified interval is re-materializable. Also returns true
|
|
|
|
/// by reference if all of the defs are load instructions.
|
|
|
|
bool isReMaterializable(const LiveInterval &li, bool &isLoad);
|
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
private:
|
2006-09-15 03:57:23 +00:00
|
|
|
/// computeIntervals - Compute live intervals.
|
2006-09-14 06:42:17 +00:00
|
|
|
void computeIntervals();
|
2006-09-02 05:26:01 +00:00
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
/// handleRegisterDef - update intervals for a register def
|
|
|
|
/// (calls handlePhysicalRegisterDef and
|
|
|
|
/// handleVirtualRegisterDef)
|
2006-09-03 08:07:11 +00:00
|
|
|
void handleRegisterDef(MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock::iterator MI, unsigned MIIdx,
|
2006-01-29 07:59:37 +00:00
|
|
|
unsigned reg);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
|
|
|
/// handleVirtualRegisterDef - update intervals for a virtual
|
|
|
|
/// register def
|
2006-09-03 08:07:11 +00:00
|
|
|
void handleVirtualRegisterDef(MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
unsigned MIIdx,
|
2004-08-04 09:46:26 +00:00
|
|
|
LiveInterval& interval);
|
|
|
|
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
/// handlePhysicalRegisterDef - update intervals for a physical register
|
2006-08-24 22:43:55 +00:00
|
|
|
/// def.
|
2004-08-04 09:46:26 +00:00
|
|
|
void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
|
|
|
|
MachineBasicBlock::iterator mi,
|
2006-09-03 08:07:11 +00:00
|
|
|
unsigned MIIdx,
|
2006-08-31 05:54:43 +00:00
|
|
|
LiveInterval &interval,
|
2008-02-15 18:24:29 +00:00
|
|
|
MachineInstr *CopyMI);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-02-19 21:49:54 +00:00
|
|
|
/// handleLiveInRegister - Create interval for a livein register.
|
2007-02-21 22:41:17 +00:00
|
|
|
void handleLiveInRegister(MachineBasicBlock* mbb,
|
|
|
|
unsigned MIIdx,
|
2007-04-25 07:30:23 +00:00
|
|
|
LiveInterval &interval, bool isAlias = false);
|
2007-02-19 21:49:54 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// getReMatImplicitUse - If the remat definition MI has one (for now, we
|
|
|
|
/// only allow one) virtual register operand, then its uses are implicitly
|
|
|
|
/// using the register. Returns the virtual register.
|
|
|
|
unsigned getReMatImplicitUse(const LiveInterval &li,
|
|
|
|
MachineInstr *MI) const;
|
|
|
|
|
|
|
|
/// isValNoAvailableAt - Return true if the val# of the specified interval
|
|
|
|
/// which reaches the given instruction also reaches the specified use
|
|
|
|
/// index.
|
|
|
|
bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
|
|
|
|
unsigned UseIdx) const;
|
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
/// isReMaterializable - Returns true if the definition MI of the specified
|
2007-12-06 00:01:56 +00:00
|
|
|
/// val# of the specified interval is re-materializable. Also returns true
|
|
|
|
/// by reference if the def is a load.
|
2007-08-29 20:45:00 +00:00
|
|
|
bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
|
2007-12-06 00:01:56 +00:00
|
|
|
MachineInstr *MI, bool &isLoad);
|
2007-08-13 23:45:17 +00:00
|
|
|
|
2007-08-30 05:52:20 +00:00
|
|
|
/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
|
|
|
|
/// slot / to reg or any rematerialized load into ith operand of specified
|
|
|
|
/// MI. If it is successul, MI is updated with the newly created MI and
|
|
|
|
/// returns true.
|
|
|
|
bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
|
2007-11-30 21:23:43 +00:00
|
|
|
MachineInstr *DefMI, unsigned InstrIdx,
|
2007-12-02 08:30:39 +00:00
|
|
|
SmallVector<unsigned, 2> &Ops,
|
2007-11-30 21:23:43 +00:00
|
|
|
bool isSS, int Slot, unsigned Reg);
|
2007-08-13 23:45:17 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// canFoldMemoryOperand - Return true if the specified load / store
|
2007-12-05 03:22:34 +00:00
|
|
|
/// folding is possible.
|
2007-12-05 03:14:33 +00:00
|
|
|
bool canFoldMemoryOperand(MachineInstr *MI,
|
2008-02-25 08:50:41 +00:00
|
|
|
SmallVector<unsigned, 2> &Ops,
|
|
|
|
bool ReMatLoadSS) const;
|
2007-12-05 03:14:33 +00:00
|
|
|
|
2007-11-29 01:06:25 +00:00
|
|
|
/// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
|
|
|
|
/// VNInfo that's after the specified index but is within the basic block.
|
|
|
|
bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI,
|
|
|
|
MachineBasicBlock *MBB, unsigned Idx) const;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
|
2007-11-29 01:06:25 +00:00
|
|
|
/// intervalIsInOneMBB - Returns true if the specified interval is entirely
|
|
|
|
/// within a single basic block.
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
bool intervalIsInOneMBB(const LiveInterval &li) const;
|
|
|
|
|
2007-11-29 10:12:14 +00:00
|
|
|
/// SRInfo - Spill / restore info.
|
|
|
|
struct SRInfo {
|
|
|
|
int index;
|
|
|
|
unsigned vreg;
|
|
|
|
bool canFold;
|
|
|
|
SRInfo(int i, unsigned vr, bool f) : index(i), vreg(vr), canFold(f) {};
|
|
|
|
};
|
|
|
|
|
|
|
|
bool alsoFoldARestore(int Id, int index, unsigned vr,
|
|
|
|
BitVector &RestoreMBBs,
|
2007-12-31 04:13:23 +00:00
|
|
|
std::map<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
2007-11-29 10:12:14 +00:00
|
|
|
void eraseRestoreInfo(int Id, int index, unsigned vr,
|
|
|
|
BitVector &RestoreMBBs,
|
2007-12-31 04:13:23 +00:00
|
|
|
std::map<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
2007-11-29 10:12:14 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
|
|
|
|
/// interval on to-be re-materialized operands of MI) with new register.
|
|
|
|
void rewriteImplicitOps(const LiveInterval &li,
|
|
|
|
MachineInstr *MI, unsigned NewVReg, VirtRegMap &vrm);
|
|
|
|
|
2007-12-31 04:13:23 +00:00
|
|
|
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper
|
|
|
|
/// functions for addIntervalsForSpills to rewrite uses / defs for the given
|
|
|
|
/// live range.
|
2008-02-22 09:24:50 +00:00
|
|
|
bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
|
|
|
bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
|
2007-11-12 06:35:08 +00:00
|
|
|
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
|
|
|
|
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
2008-02-22 09:24:50 +00:00
|
|
|
VirtRegMap &vrm, const TargetRegisterClass* rc,
|
|
|
|
SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
|
2008-02-23 00:46:11 +00:00
|
|
|
unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
|
2007-11-29 10:12:14 +00:00
|
|
|
std::map<unsigned,unsigned> &MBBVRegsMap,
|
2007-11-12 06:35:08 +00:00
|
|
|
std::vector<LiveInterval*> &NewLIs);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
2007-11-12 06:35:08 +00:00
|
|
|
LiveInterval::Ranges::const_iterator &I,
|
|
|
|
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
|
|
|
|
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
2008-02-22 09:24:50 +00:00
|
|
|
VirtRegMap &vrm, const TargetRegisterClass* rc,
|
2007-12-11 02:09:15 +00:00
|
|
|
SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
BitVector &SpillMBBs,
|
2007-11-29 10:12:14 +00:00
|
|
|
std::map<unsigned,std::vector<SRInfo> > &SpillIdxes,
|
2007-11-29 01:06:25 +00:00
|
|
|
BitVector &RestoreMBBs,
|
2007-11-29 10:12:14 +00:00
|
|
|
std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes,
|
|
|
|
std::map<unsigned,unsigned> &MBBVRegsMap,
|
2007-11-12 06:35:08 +00:00
|
|
|
std::vector<LiveInterval*> &NewLIs);
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
static LiveInterval createInterval(unsigned Reg);
|
|
|
|
|
|
|
|
void printRegName(unsigned reg) const;
|
|
|
|
};
|
|
|
|
|
2003-11-20 03:32:25 +00:00
|
|
|
} // End llvm namespace
|
|
|
|
|
|
|
|
#endif
|