mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-09 01:38:03 +00:00
Replaces uses of unsigned for indexes in LiveInterval and VNInfo with
a new class, MachineInstrIndex, which hides arithmetic details from most clients. This is a step towards allowing the register allocator to update/insert code during allocation. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@81040 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
5684229a45
commit
8651125d28
@ -21,6 +21,7 @@
|
||||
#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
#define LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
|
||||
#include "llvm/ADT/DenseMapInfo.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/AlignOf.h"
|
||||
@ -32,6 +33,217 @@ namespace llvm {
|
||||
class MachineRegisterInfo;
|
||||
class TargetRegisterInfo;
|
||||
class raw_ostream;
|
||||
|
||||
/// MachineInstrIndex - An opaque wrapper around machine indexes.
|
||||
class MachineInstrIndex {
|
||||
friend class VNInfo;
|
||||
friend class LiveInterval;
|
||||
friend class LiveIntervals;
|
||||
friend struct DenseMapInfo<MachineInstrIndex>;
|
||||
|
||||
public:
|
||||
|
||||
enum Slot { LOAD, USE, DEF, STORE, NUM };
|
||||
|
||||
private:
|
||||
|
||||
unsigned index;
|
||||
|
||||
static const unsigned PHI_BIT = 1 << 31;
|
||||
|
||||
public:
|
||||
|
||||
/// Construct a default MachineInstrIndex pointing to a reserved index.
|
||||
MachineInstrIndex() : index(0) {}
|
||||
|
||||
/// Construct an index from the given index, pointing to the given slot.
|
||||
MachineInstrIndex(MachineInstrIndex m, Slot s)
|
||||
: index((m.index / NUM) * NUM + s) {}
|
||||
|
||||
/// Print this index to the given raw_ostream.
|
||||
void print(raw_ostream &os) const;
|
||||
|
||||
/// Print this index to the given std::ostream.
|
||||
void print(std::ostream &os) const;
|
||||
|
||||
/// Compare two MachineInstrIndex objects for equality.
|
||||
bool operator==(MachineInstrIndex other) const {
|
||||
return ((index & ~PHI_BIT) == (other.index & ~PHI_BIT));
|
||||
}
|
||||
/// Compare two MachineInstrIndex objects for inequality.
|
||||
bool operator!=(MachineInstrIndex other) const {
|
||||
return ((index & ~PHI_BIT) != (other.index & ~PHI_BIT));
|
||||
}
|
||||
|
||||
/// Compare two MachineInstrIndex objects. Return true if the first index
|
||||
/// is strictly lower than the second.
|
||||
bool operator<(MachineInstrIndex other) const {
|
||||
return ((index & ~PHI_BIT) < (other.index & ~PHI_BIT));
|
||||
}
|
||||
/// Compare two MachineInstrIndex objects. Return true if the first index
|
||||
/// is lower than, or equal to, the second.
|
||||
bool operator<=(MachineInstrIndex other) const {
|
||||
return ((index & ~PHI_BIT) <= (other.index & ~PHI_BIT));
|
||||
}
|
||||
|
||||
/// Compare two MachineInstrIndex objects. Return true if the first index
|
||||
/// is greater than the second.
|
||||
bool operator>(MachineInstrIndex other) const {
|
||||
return ((index & ~PHI_BIT) > (other.index & ~PHI_BIT));
|
||||
}
|
||||
|
||||
/// Compare two MachineInstrIndex objects. Return true if the first index
|
||||
/// is greater than, or equal to, the second.
|
||||
bool operator>=(MachineInstrIndex other) const {
|
||||
return ((index & ~PHI_BIT) >= (other.index & ~PHI_BIT));
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a load.
|
||||
bool isLoad() const {
|
||||
return ((index % NUM) == LOAD);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a use.
|
||||
bool isUse() const {
|
||||
return ((index % NUM) == USE);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a def.
|
||||
bool isDef() const {
|
||||
return ((index % NUM) == DEF);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a store.
|
||||
bool isStore() const {
|
||||
return ((index % NUM) == STORE);
|
||||
}
|
||||
|
||||
/// Returns the slot for this MachineInstrIndex.
|
||||
Slot getSlot() const {
|
||||
return static_cast<Slot>(index % NUM);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a non-PHI use/def.
|
||||
bool isNonPHIIndex() const {
|
||||
return ((index & PHI_BIT) == 0);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a PHI use/def.
|
||||
bool isPHIIndex() const {
|
||||
return ((index & PHI_BIT) == PHI_BIT);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/// Construct an index from the given index, with its PHI kill marker set.
|
||||
MachineInstrIndex(bool phi, MachineInstrIndex o) : index(o.index) {
|
||||
if (phi)
|
||||
index |= PHI_BIT;
|
||||
else
|
||||
index &= ~PHI_BIT;
|
||||
}
|
||||
|
||||
explicit MachineInstrIndex(unsigned idx)
|
||||
: index(idx & ~PHI_BIT) {}
|
||||
|
||||
MachineInstrIndex(bool phi, unsigned idx)
|
||||
: index(idx & ~PHI_BIT) {
|
||||
if (phi)
|
||||
index |= PHI_BIT;
|
||||
}
|
||||
|
||||
MachineInstrIndex(bool phi, unsigned idx, Slot slot)
|
||||
: index(((idx / NUM) * NUM + slot) & ~PHI_BIT) {
|
||||
if (phi)
|
||||
index |= PHI_BIT;
|
||||
}
|
||||
|
||||
MachineInstrIndex nextSlot() const {
|
||||
assert((index & PHI_BIT) == ((index + 1) & PHI_BIT) &&
|
||||
"Index out of bounds.");
|
||||
return MachineInstrIndex(index + 1);
|
||||
}
|
||||
|
||||
MachineInstrIndex nextIndex() const {
|
||||
assert((index & PHI_BIT) == ((index + NUM) & PHI_BIT) &&
|
||||
"Index out of bounds.");
|
||||
return MachineInstrIndex(index + NUM);
|
||||
}
|
||||
|
||||
MachineInstrIndex prevSlot() const {
|
||||
assert((index & PHI_BIT) == ((index - 1) & PHI_BIT) &&
|
||||
"Index out of bounds.");
|
||||
return MachineInstrIndex(index - 1);
|
||||
}
|
||||
|
||||
MachineInstrIndex prevIndex() const {
|
||||
assert((index & PHI_BIT) == ((index - NUM) & PHI_BIT) &&
|
||||
"Index out of bounds.");
|
||||
return MachineInstrIndex(index - NUM);
|
||||
}
|
||||
|
||||
int distance(MachineInstrIndex other) const {
|
||||
return (other.index & ~PHI_BIT) - (index & ~PHI_BIT);
|
||||
}
|
||||
|
||||
/// Returns an unsigned number suitable as an index into a
|
||||
/// vector over all instructions.
|
||||
unsigned getVecIndex() const {
|
||||
return (index & ~PHI_BIT) / NUM;
|
||||
}
|
||||
|
||||
/// Scale this index by the given factor.
|
||||
MachineInstrIndex scale(unsigned factor) const {
|
||||
unsigned i = (index & ~PHI_BIT) / NUM,
|
||||
o = (index % ~PHI_BIT) % NUM;
|
||||
assert(index <= (~0U & ~PHI_BIT) / (factor * NUM) &&
|
||||
"Rescaled interval would overflow");
|
||||
return MachineInstrIndex(i * NUM * factor, o);
|
||||
}
|
||||
|
||||
static MachineInstrIndex emptyKey() {
|
||||
return MachineInstrIndex(true, 0x7fffffff);
|
||||
}
|
||||
|
||||
static MachineInstrIndex tombstoneKey() {
|
||||
return MachineInstrIndex(true, 0x7ffffffe);
|
||||
}
|
||||
|
||||
static unsigned getHashValue(const MachineInstrIndex &v) {
|
||||
return v.index * 37;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
inline raw_ostream& operator<<(raw_ostream &os, MachineInstrIndex mi) {
|
||||
mi.print(os);
|
||||
return os;
|
||||
}
|
||||
|
||||
inline std::ostream& operator<<(std::ostream &os, MachineInstrIndex mi) {
|
||||
mi.print(os);
|
||||
return os;
|
||||
}
|
||||
|
||||
/// Densemap specialization for MachineInstrIndex.
|
||||
template <>
|
||||
struct DenseMapInfo<MachineInstrIndex> {
|
||||
static inline MachineInstrIndex getEmptyKey() {
|
||||
return MachineInstrIndex::emptyKey();
|
||||
}
|
||||
static inline MachineInstrIndex getTombstoneKey() {
|
||||
return MachineInstrIndex::tombstoneKey();
|
||||
}
|
||||
static inline unsigned getHashValue(const MachineInstrIndex &v) {
|
||||
return MachineInstrIndex::getHashValue(v);
|
||||
}
|
||||
static inline bool isEqual(const MachineInstrIndex &LHS,
|
||||
const MachineInstrIndex &RHS) {
|
||||
return (LHS == RHS);
|
||||
}
|
||||
static inline bool isPod() { return true; }
|
||||
};
|
||||
|
||||
|
||||
/// VNInfo - Value Number Information.
|
||||
/// This class holds information about a machine level values, including
|
||||
@ -65,36 +277,24 @@ namespace llvm {
|
||||
} cr;
|
||||
|
||||
public:
|
||||
/// Holds information about individual kills.
|
||||
struct KillInfo {
|
||||
bool isPHIKill : 1;
|
||||
unsigned killIdx : 31;
|
||||
|
||||
KillInfo(bool isPHIKill, unsigned killIdx)
|
||||
: isPHIKill(isPHIKill), killIdx(killIdx) {
|
||||
|
||||
assert(killIdx != 0 && "Zero kill indices are no longer permitted.");
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
typedef SmallVector<KillInfo, 4> KillSet;
|
||||
typedef SmallVector<MachineInstrIndex, 4> KillSet;
|
||||
|
||||
/// The ID number of this value.
|
||||
unsigned id;
|
||||
|
||||
/// The index of the defining instruction (if isDefAccurate() returns true).
|
||||
unsigned def;
|
||||
MachineInstrIndex def;
|
||||
|
||||
KillSet kills;
|
||||
|
||||
VNInfo()
|
||||
: flags(IS_UNUSED), id(~1U), def(0) { cr.copy = 0; }
|
||||
: flags(IS_UNUSED), id(~1U) { cr.copy = 0; }
|
||||
|
||||
/// VNInfo constructor.
|
||||
/// d is presumed to point to the actual defining instr. If it doesn't
|
||||
/// setIsDefAccurate(false) should be called after construction.
|
||||
VNInfo(unsigned i, unsigned d, MachineInstr *c)
|
||||
VNInfo(unsigned i, MachineInstrIndex d, MachineInstr *c)
|
||||
: flags(IS_DEF_ACCURATE), id(i), def(d) { cr.copy = c; }
|
||||
|
||||
/// VNInfo construtor, copies values from orig, except for the value number.
|
||||
@ -134,6 +334,7 @@ namespace llvm {
|
||||
|
||||
/// Returns true if one or more kills are PHI nodes.
|
||||
bool hasPHIKill() const { return flags & HAS_PHI_KILL; }
|
||||
/// Set the PHI kill flag on this value.
|
||||
void setHasPHIKill(bool hasKill) {
|
||||
if (hasKill)
|
||||
flags |= HAS_PHI_KILL;
|
||||
@ -144,6 +345,7 @@ namespace llvm {
|
||||
/// Returns true if this value is re-defined by an early clobber somewhere
|
||||
/// during the live range.
|
||||
bool hasRedefByEC() const { return flags & REDEF_BY_EC; }
|
||||
/// Set the "redef by early clobber" flag on this value.
|
||||
void setHasRedefByEC(bool hasRedef) {
|
||||
if (hasRedef)
|
||||
flags |= REDEF_BY_EC;
|
||||
@ -154,6 +356,7 @@ namespace llvm {
|
||||
/// Returns true if this value is defined by a PHI instruction (or was,
|
||||
/// PHI instrucions may have been eliminated).
|
||||
bool isPHIDef() const { return flags & IS_PHI_DEF; }
|
||||
/// Set the "phi def" flag on this value.
|
||||
void setIsPHIDef(bool phiDef) {
|
||||
if (phiDef)
|
||||
flags |= IS_PHI_DEF;
|
||||
@ -163,6 +366,7 @@ namespace llvm {
|
||||
|
||||
/// Returns true if this value is unused.
|
||||
bool isUnused() const { return flags & IS_UNUSED; }
|
||||
/// Set the "is unused" flag on this value.
|
||||
void setIsUnused(bool unused) {
|
||||
if (unused)
|
||||
flags |= IS_UNUSED;
|
||||
@ -172,6 +376,7 @@ namespace llvm {
|
||||
|
||||
/// Returns true if the def is accurate.
|
||||
bool isDefAccurate() const { return flags & IS_DEF_ACCURATE; }
|
||||
/// Set the "is def accurate" flag on this value.
|
||||
void setIsDefAccurate(bool defAccurate) {
|
||||
if (defAccurate)
|
||||
flags |= IS_DEF_ACCURATE;
|
||||
@ -179,38 +384,74 @@ namespace llvm {
|
||||
flags &= ~IS_DEF_ACCURATE;
|
||||
}
|
||||
|
||||
/// Returns true if the given index is a kill of this value.
|
||||
bool isKill(MachineInstrIndex k) const {
|
||||
KillSet::const_iterator
|
||||
i = std::lower_bound(kills.begin(), kills.end(), k);
|
||||
return (i != kills.end() && *i == k);
|
||||
}
|
||||
|
||||
/// addKill - Add a kill instruction index to the specified value
|
||||
/// number.
|
||||
void addKill(MachineInstrIndex k) {
|
||||
if (kills.empty()) {
|
||||
kills.push_back(k);
|
||||
} else {
|
||||
KillSet::iterator
|
||||
i = std::lower_bound(kills.begin(), kills.end(), k);
|
||||
kills.insert(i, k);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove the specified kill index from this value's kills list.
|
||||
/// Returns true if the value was present, otherwise returns false.
|
||||
bool removeKill(MachineInstrIndex k) {
|
||||
KillSet::iterator i = std::lower_bound(kills.begin(), kills.end(), k);
|
||||
if (i != kills.end() && *i == k) {
|
||||
kills.erase(i);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Remove all kills in the range [s, e).
|
||||
void removeKills(MachineInstrIndex s, MachineInstrIndex e) {
|
||||
KillSet::iterator
|
||||
si = std::lower_bound(kills.begin(), kills.end(), s),
|
||||
se = std::upper_bound(kills.begin(), kills.end(), e);
|
||||
|
||||
kills.erase(si, se);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
inline bool operator<(const VNInfo::KillInfo &k1, const VNInfo::KillInfo &k2){
|
||||
return k1.killIdx < k2.killIdx;
|
||||
}
|
||||
|
||||
inline bool operator<(const VNInfo::KillInfo &k, unsigned idx) {
|
||||
return k.killIdx < idx;
|
||||
}
|
||||
|
||||
inline bool operator<(unsigned idx, const VNInfo::KillInfo &k) {
|
||||
return idx < k.killIdx;
|
||||
}
|
||||
|
||||
/// LiveRange structure - This represents a simple register range in the
|
||||
/// program, with an inclusive start point and an exclusive end point.
|
||||
/// These ranges are rendered as [start,end).
|
||||
struct LiveRange {
|
||||
unsigned start; // Start point of the interval (inclusive)
|
||||
unsigned end; // End point of the interval (exclusive)
|
||||
MachineInstrIndex start; // Start point of the interval (inclusive)
|
||||
MachineInstrIndex end; // End point of the interval (exclusive)
|
||||
VNInfo *valno; // identifier for the value contained in this interval.
|
||||
|
||||
LiveRange(unsigned S, unsigned E, VNInfo *V) : start(S), end(E), valno(V) {
|
||||
LiveRange(MachineInstrIndex S, MachineInstrIndex E, VNInfo *V)
|
||||
: start(S), end(E), valno(V) {
|
||||
|
||||
assert(S < E && "Cannot create empty or backwards range");
|
||||
}
|
||||
|
||||
/// contains - Return true if the index is covered by this range.
|
||||
///
|
||||
bool contains(unsigned I) const {
|
||||
bool contains(MachineInstrIndex I) const {
|
||||
return start <= I && I < end;
|
||||
}
|
||||
|
||||
/// containsRange - Return true if the given range, [S, E), is covered by
|
||||
/// this range.
|
||||
bool containsRange(MachineInstrIndex S, MachineInstrIndex E) const {
|
||||
assert((S < E) && "Backwards interval?");
|
||||
return (start <= S && S < end) && (start < E && E <= end);
|
||||
}
|
||||
|
||||
bool operator<(const LiveRange &LR) const {
|
||||
return start < LR.start || (start == LR.start && end < LR.end);
|
||||
}
|
||||
@ -228,11 +469,11 @@ namespace llvm {
|
||||
raw_ostream& operator<<(raw_ostream& os, const LiveRange &LR);
|
||||
|
||||
|
||||
inline bool operator<(unsigned V, const LiveRange &LR) {
|
||||
inline bool operator<(MachineInstrIndex V, const LiveRange &LR) {
|
||||
return V < LR.start;
|
||||
}
|
||||
|
||||
inline bool operator<(const LiveRange &LR, unsigned V) {
|
||||
inline bool operator<(const LiveRange &LR, MachineInstrIndex V) {
|
||||
return LR.start < V;
|
||||
}
|
||||
|
||||
@ -260,14 +501,6 @@ namespace llvm {
|
||||
NUM = 4
|
||||
};
|
||||
|
||||
static unsigned scale(unsigned slot, unsigned factor) {
|
||||
unsigned index = slot / NUM,
|
||||
offset = slot % NUM;
|
||||
assert(index <= ~0U / (factor * NUM) &&
|
||||
"Rescaled interval would overflow");
|
||||
return index * NUM * factor + offset;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
LiveInterval(unsigned Reg, float Weight, bool IsSS = false)
|
||||
@ -297,8 +530,8 @@ namespace llvm {
|
||||
/// end of the interval. If no LiveRange contains this position, but the
|
||||
/// position is in a hole, this method returns an iterator pointing the the
|
||||
/// LiveRange immediately after the hole.
|
||||
iterator advanceTo(iterator I, unsigned Pos) {
|
||||
if (Pos >= endNumber())
|
||||
iterator advanceTo(iterator I, MachineInstrIndex Pos) {
|
||||
if (Pos >= endIndex())
|
||||
return end();
|
||||
while (I->end <= Pos) ++I;
|
||||
return I;
|
||||
@ -344,15 +577,12 @@ namespace llvm {
|
||||
|
||||
/// getNextValue - Create a new value number and return it. MIIdx specifies
|
||||
/// the instruction that defines the value number.
|
||||
VNInfo *getNextValue(unsigned MIIdx, MachineInstr *CopyMI,
|
||||
VNInfo *getNextValue(MachineInstrIndex def, MachineInstr *CopyMI,
|
||||
bool isDefAccurate, BumpPtrAllocator &VNInfoAllocator){
|
||||
|
||||
assert(MIIdx != ~0u && MIIdx != ~1u &&
|
||||
"PHI def / unused flags should now be passed explicitly.");
|
||||
VNInfo *VNI =
|
||||
static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo),
|
||||
alignof<VNInfo>()));
|
||||
new (VNI) VNInfo((unsigned)valnos.size(), MIIdx, CopyMI);
|
||||
new (VNI) VNInfo((unsigned)valnos.size(), def, CopyMI);
|
||||
VNI->setIsDefAccurate(isDefAccurate);
|
||||
valnos.push_back(VNI);
|
||||
return VNI;
|
||||
@ -372,50 +602,52 @@ namespace llvm {
|
||||
return VNI;
|
||||
}
|
||||
|
||||
/// addKill - Add a kill instruction index to the specified value
|
||||
/// number.
|
||||
static void addKill(VNInfo *VNI, unsigned KillIdx, bool phiKill) {
|
||||
VNInfo::KillSet &kills = VNI->kills;
|
||||
VNInfo::KillInfo newKill(phiKill, KillIdx);
|
||||
if (kills.empty()) {
|
||||
kills.push_back(newKill);
|
||||
} else {
|
||||
VNInfo::KillSet::iterator
|
||||
I = std::lower_bound(kills.begin(), kills.end(), newKill);
|
||||
kills.insert(I, newKill);
|
||||
}
|
||||
}
|
||||
|
||||
/// addKills - Add a number of kills into the VNInfo kill vector. If this
|
||||
/// interval is live at a kill point, then the kill is not added.
|
||||
void addKills(VNInfo *VNI, const VNInfo::KillSet &kills) {
|
||||
for (unsigned i = 0, e = static_cast<unsigned>(kills.size());
|
||||
i != e; ++i) {
|
||||
const VNInfo::KillInfo &Kill = kills[i];
|
||||
if (!liveBeforeAndAt(Kill.killIdx)) {
|
||||
VNInfo::KillSet::iterator
|
||||
I = std::lower_bound(VNI->kills.begin(), VNI->kills.end(), Kill);
|
||||
VNI->kills.insert(I, Kill);
|
||||
if (!liveBeforeAndAt(kills[i])) {
|
||||
VNI->addKill(kills[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* REMOVE_ME
|
||||
/// addKill - Add a kill instruction index to the specified value
|
||||
/// number.
|
||||
static void addKill(VNInfo *VNI, MachineInstrIndex killIdx) {
|
||||
assert(killIdx.isUse() && "Kill must be a use.");
|
||||
if (VNI->kills.empty()) {
|
||||
VNI->kills.push_back(killIdx);
|
||||
} else {
|
||||
VNInfo::KillSet::iterator
|
||||
I = std::lower_bound(VNI->kills.begin(), VNI->kills.end(), killIdx);
|
||||
VNI->kills.insert(I, killIdx);
|
||||
}
|
||||
}
|
||||
|
||||
/// removeKill - Remove the specified kill from the list of kills of
|
||||
/// the specified val#.
|
||||
static bool removeKill(VNInfo *VNI, unsigned KillIdx) {
|
||||
VNInfo::KillSet &kills = VNI->kills;
|
||||
static bool removeKill(VNInfo *VNI, MachineInstrIndex Kill) {
|
||||
|
||||
VNInfo::KillSet::iterator
|
||||
I = std::lower_bound(kills.begin(), kills.end(), KillIdx);
|
||||
if (I != kills.end() && I->killIdx == KillIdx) {
|
||||
kills.erase(I);
|
||||
I = std::lower_bound(VNI->kills.begin(), VNI->kills.end(), Kill);
|
||||
if (I != VNI->kills.end() && (*I == Kill)) {
|
||||
VNI->kills.erase(I);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// removeKills - Remove all the kills in specified range
|
||||
/// [Start, End] of the specified val#.
|
||||
static void removeKills(VNInfo *VNI, unsigned Start, unsigned End) {
|
||||
static void removeKills(VNInfo *VNI, MachineInstrIndex Start,
|
||||
MachineInstrIndex End) {
|
||||
|
||||
VNInfo::KillSet &kills = VNI->kills;
|
||||
|
||||
VNInfo::KillSet::iterator
|
||||
@ -424,15 +656,16 @@ namespace llvm {
|
||||
E = std::upper_bound(kills.begin(), kills.end(), End);
|
||||
kills.erase(I, E);
|
||||
}
|
||||
|
||||
|
||||
/// isKill - Return true if the specified index is a kill of the
|
||||
/// specified val#.
|
||||
static bool isKill(const VNInfo *VNI, unsigned KillIdx) {
|
||||
const VNInfo::KillSet &kills = VNI->kills;
|
||||
static bool isKill(const VNInfo *VNI, MachineInstrIndex Kill) {
|
||||
VNInfo::KillSet::const_iterator
|
||||
I = std::lower_bound(kills.begin(), kills.end(), KillIdx);
|
||||
return I != kills.end() && I->killIdx == KillIdx;
|
||||
I = std::lower_bound(VNI->kills.begin(), VNI->kills.end(), Kill);
|
||||
return I != VNI->kills.end() && (*I == Kill);
|
||||
}
|
||||
*/
|
||||
|
||||
/// isOnlyLROfValNo - Return true if the specified live range is the only
|
||||
/// one defined by the its val#.
|
||||
@ -460,7 +693,8 @@ namespace llvm {
|
||||
|
||||
/// MergeInClobberRange - Same as MergeInClobberRanges except it merge in a
|
||||
/// single LiveRange only.
|
||||
void MergeInClobberRange(unsigned Start, unsigned End,
|
||||
void MergeInClobberRange(MachineInstrIndex Start,
|
||||
MachineInstrIndex End,
|
||||
BumpPtrAllocator &VNInfoAllocator);
|
||||
|
||||
/// MergeValueInAsValue - Merge all of the live ranges of a specific val#
|
||||
@ -485,58 +719,62 @@ namespace llvm {
|
||||
|
||||
bool empty() const { return ranges.empty(); }
|
||||
|
||||
/// beginNumber - Return the lowest numbered slot covered by interval.
|
||||
unsigned beginNumber() const {
|
||||
/// beginIndex - Return the lowest numbered slot covered by interval.
|
||||
MachineInstrIndex beginIndex() const {
|
||||
if (empty())
|
||||
return 0;
|
||||
return MachineInstrIndex();
|
||||
return ranges.front().start;
|
||||
}
|
||||
|
||||
/// endNumber - return the maximum point of the interval of the whole,
|
||||
/// exclusive.
|
||||
unsigned endNumber() const {
|
||||
MachineInstrIndex endIndex() const {
|
||||
if (empty())
|
||||
return 0;
|
||||
return MachineInstrIndex();
|
||||
return ranges.back().end;
|
||||
}
|
||||
|
||||
bool expiredAt(unsigned index) const {
|
||||
return index >= endNumber();
|
||||
bool expiredAt(MachineInstrIndex index) const {
|
||||
return index >= endIndex();
|
||||
}
|
||||
|
||||
bool liveAt(unsigned index) const;
|
||||
bool liveAt(MachineInstrIndex index) const;
|
||||
|
||||
// liveBeforeAndAt - Check if the interval is live at the index and the
|
||||
// index just before it. If index is liveAt, check if it starts a new live
|
||||
// range.If it does, then check if the previous live range ends at index-1.
|
||||
bool liveBeforeAndAt(unsigned index) const;
|
||||
bool liveBeforeAndAt(MachineInstrIndex index) const;
|
||||
|
||||
/// getLiveRangeContaining - Return the live range that contains the
|
||||
/// specified index, or null if there is none.
|
||||
const LiveRange *getLiveRangeContaining(unsigned Idx) const {
|
||||
const LiveRange *getLiveRangeContaining(MachineInstrIndex Idx) const {
|
||||
const_iterator I = FindLiveRangeContaining(Idx);
|
||||
return I == end() ? 0 : &*I;
|
||||
}
|
||||
|
||||
/// getLiveRangeContaining - Return the live range that contains the
|
||||
/// specified index, or null if there is none.
|
||||
LiveRange *getLiveRangeContaining(unsigned Idx) {
|
||||
LiveRange *getLiveRangeContaining(MachineInstrIndex Idx) {
|
||||
iterator I = FindLiveRangeContaining(Idx);
|
||||
return I == end() ? 0 : &*I;
|
||||
}
|
||||
|
||||
/// FindLiveRangeContaining - Return an iterator to the live range that
|
||||
/// contains the specified index, or end() if there is none.
|
||||
const_iterator FindLiveRangeContaining(unsigned Idx) const;
|
||||
const_iterator FindLiveRangeContaining(MachineInstrIndex Idx) const;
|
||||
|
||||
/// FindLiveRangeContaining - Return an iterator to the live range that
|
||||
/// contains the specified index, or end() if there is none.
|
||||
iterator FindLiveRangeContaining(unsigned Idx);
|
||||
iterator FindLiveRangeContaining(MachineInstrIndex Idx);
|
||||
|
||||
/// findDefinedVNInfo - Find the by the specified
|
||||
/// index (register interval) or defined
|
||||
VNInfo *findDefinedVNInfoForRegInt(MachineInstrIndex Idx) const;
|
||||
|
||||
/// findDefinedVNInfo - Find the VNInfo that's defined by the specified
|
||||
/// register (stack inteval only).
|
||||
VNInfo *findDefinedVNInfoForStackInt(unsigned Reg) const;
|
||||
|
||||
/// findDefinedVNInfo - Find the VNInfo that's defined at the specified
|
||||
/// index (register interval) or defined by the specified register (stack
|
||||
/// inteval).
|
||||
VNInfo *findDefinedVNInfo(unsigned DefIdxOrReg) const;
|
||||
|
||||
/// overlaps - Return true if the intersection of the two live intervals is
|
||||
/// not empty.
|
||||
@ -546,7 +784,7 @@ namespace llvm {
|
||||
|
||||
/// overlaps - Return true if the live interval overlaps a range specified
|
||||
/// by [Start, End).
|
||||
bool overlaps(unsigned Start, unsigned End) const;
|
||||
bool overlaps(MachineInstrIndex Start, MachineInstrIndex End) const;
|
||||
|
||||
/// overlapsFrom - Return true if the intersection of the two live intervals
|
||||
/// is not empty. The specified iterator is a hint that we can begin
|
||||
@ -570,11 +808,12 @@ namespace llvm {
|
||||
|
||||
/// isInOneLiveRange - Return true if the range specified is entirely in the
|
||||
/// a single LiveRange of the live interval.
|
||||
bool isInOneLiveRange(unsigned Start, unsigned End);
|
||||
bool isInOneLiveRange(MachineInstrIndex Start, MachineInstrIndex End);
|
||||
|
||||
/// removeRange - Remove the specified range from this interval. Note that
|
||||
/// the range must be a single LiveRange in its entirety.
|
||||
void removeRange(unsigned Start, unsigned End, bool RemoveDeadValNo = false);
|
||||
void removeRange(MachineInstrIndex Start, MachineInstrIndex End,
|
||||
bool RemoveDeadValNo = false);
|
||||
|
||||
void removeRange(LiveRange LR, bool RemoveDeadValNo = false) {
|
||||
removeRange(LR.start, LR.end, RemoveDeadValNo);
|
||||
@ -597,7 +836,7 @@ namespace llvm {
|
||||
void ComputeJoinedWeight(const LiveInterval &Other);
|
||||
|
||||
bool operator<(const LiveInterval& other) const {
|
||||
return beginNumber() < other.beginNumber();
|
||||
return beginIndex() < other.beginIndex();
|
||||
}
|
||||
|
||||
void print(raw_ostream &OS, const TargetRegisterInfo *TRI = 0) const;
|
||||
@ -606,8 +845,8 @@ namespace llvm {
|
||||
private:
|
||||
|
||||
Ranges::iterator addRangeFrom(LiveRange LR, Ranges::iterator From);
|
||||
void extendIntervalEndTo(Ranges::iterator I, unsigned NewEnd);
|
||||
Ranges::iterator extendIntervalStartTo(Ranges::iterator I, unsigned NewStr);
|
||||
void extendIntervalEndTo(Ranges::iterator I, MachineInstrIndex NewEnd);
|
||||
Ranges::iterator extendIntervalStartTo(Ranges::iterator I, MachineInstrIndex NewStr);
|
||||
LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT
|
||||
|
||||
};
|
||||
|
@ -40,13 +40,13 @@ namespace llvm {
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterClass;
|
||||
class VirtRegMap;
|
||||
typedef std::pair<unsigned, MachineBasicBlock*> IdxMBBPair;
|
||||
typedef std::pair<MachineInstrIndex, MachineBasicBlock*> IdxMBBPair;
|
||||
|
||||
inline bool operator<(unsigned V, const IdxMBBPair &IM) {
|
||||
inline bool operator<(MachineInstrIndex V, const IdxMBBPair &IM) {
|
||||
return V < IM.first;
|
||||
}
|
||||
|
||||
inline bool operator<(const IdxMBBPair &IM, unsigned V) {
|
||||
inline bool operator<(const IdxMBBPair &IM, MachineInstrIndex V) {
|
||||
return IM.first < V;
|
||||
}
|
||||
|
||||
@ -65,13 +65,16 @@ namespace llvm {
|
||||
AliasAnalysis *aa_;
|
||||
LiveVariables* lv_;
|
||||
|
||||
|
||||
|
||||
|
||||
/// Special pool allocator for VNInfo's (LiveInterval val#).
|
||||
///
|
||||
BumpPtrAllocator VNInfoAllocator;
|
||||
|
||||
/// MBB2IdxMap - The indexes of the first and last instructions in the
|
||||
/// specified basic block.
|
||||
std::vector<std::pair<unsigned, unsigned> > MBB2IdxMap;
|
||||
std::vector<std::pair<MachineInstrIndex, MachineInstrIndex> > MBB2IdxMap;
|
||||
|
||||
/// Idx2MBBMap - Sorted list of pairs of index of first instruction
|
||||
/// and MBB id.
|
||||
@ -80,7 +83,7 @@ namespace llvm {
|
||||
/// FunctionSize - The number of instructions present in the function
|
||||
uint64_t FunctionSize;
|
||||
|
||||
typedef DenseMap<const MachineInstr*, unsigned> Mi2IndexMap;
|
||||
typedef DenseMap<const MachineInstr*, MachineInstrIndex> Mi2IndexMap;
|
||||
Mi2IndexMap mi2iMap_;
|
||||
|
||||
typedef std::vector<MachineInstr*> Index2MiMap;
|
||||
@ -89,7 +92,7 @@ namespace llvm {
|
||||
typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap;
|
||||
Reg2IntervalMap r2iMap_;
|
||||
|
||||
DenseMap<MachineBasicBlock*, unsigned> terminatorGaps;
|
||||
DenseMap<MachineBasicBlock*, MachineInstrIndex> terminatorGaps;
|
||||
|
||||
BitVector allocatableRegs_;
|
||||
|
||||
@ -101,23 +104,40 @@ namespace llvm {
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
LiveIntervals() : MachineFunctionPass(&ID) {}
|
||||
|
||||
static unsigned getBaseIndex(unsigned index) {
|
||||
return index - (index % InstrSlots::NUM);
|
||||
static MachineInstrIndex getBaseIndex(MachineInstrIndex index) {
|
||||
return MachineInstrIndex(index, MachineInstrIndex::LOAD);
|
||||
}
|
||||
static unsigned getBoundaryIndex(unsigned index) {
|
||||
return getBaseIndex(index + InstrSlots::NUM - 1);
|
||||
static MachineInstrIndex getBoundaryIndex(MachineInstrIndex index) {
|
||||
return MachineInstrIndex(index,
|
||||
(MachineInstrIndex::Slot)(MachineInstrIndex::NUM - 1));
|
||||
}
|
||||
static unsigned getLoadIndex(unsigned index) {
|
||||
return getBaseIndex(index) + InstrSlots::LOAD;
|
||||
static MachineInstrIndex getLoadIndex(MachineInstrIndex index) {
|
||||
return MachineInstrIndex(index, MachineInstrIndex::LOAD);
|
||||
}
|
||||
static unsigned getUseIndex(unsigned index) {
|
||||
return getBaseIndex(index) + InstrSlots::USE;
|
||||
static MachineInstrIndex getUseIndex(MachineInstrIndex index) {
|
||||
return MachineInstrIndex(index, MachineInstrIndex::USE);
|
||||
}
|
||||
static unsigned getDefIndex(unsigned index) {
|
||||
return getBaseIndex(index) + InstrSlots::DEF;
|
||||
static MachineInstrIndex getDefIndex(MachineInstrIndex index) {
|
||||
return MachineInstrIndex(index, MachineInstrIndex::DEF);
|
||||
}
|
||||
static unsigned getStoreIndex(unsigned index) {
|
||||
return getBaseIndex(index) + InstrSlots::STORE;
|
||||
static MachineInstrIndex getStoreIndex(MachineInstrIndex index) {
|
||||
return MachineInstrIndex(index, MachineInstrIndex::STORE);
|
||||
}
|
||||
|
||||
MachineInstrIndex getNextSlot(MachineInstrIndex m) const {
|
||||
return m.nextSlot();
|
||||
}
|
||||
|
||||
MachineInstrIndex getNextIndex(MachineInstrIndex m) const {
|
||||
return m.nextIndex();
|
||||
}
|
||||
|
||||
MachineInstrIndex getPrevSlot(MachineInstrIndex m) const {
|
||||
return m.prevSlot();
|
||||
}
|
||||
|
||||
MachineInstrIndex getPrevIndex(MachineInstrIndex m) const {
|
||||
return m.prevIndex();
|
||||
}
|
||||
|
||||
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
|
||||
@ -150,20 +170,20 @@ namespace llvm {
|
||||
|
||||
/// getMBBStartIdx - Return the base index of the first instruction in the
|
||||
/// specified MachineBasicBlock.
|
||||
unsigned getMBBStartIdx(MachineBasicBlock *MBB) const {
|
||||
MachineInstrIndex getMBBStartIdx(MachineBasicBlock *MBB) const {
|
||||
return getMBBStartIdx(MBB->getNumber());
|
||||
}
|
||||
unsigned getMBBStartIdx(unsigned MBBNo) const {
|
||||
MachineInstrIndex getMBBStartIdx(unsigned MBBNo) const {
|
||||
assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
|
||||
return MBB2IdxMap[MBBNo].first;
|
||||
}
|
||||
|
||||
/// getMBBEndIdx - Return the store index of the last instruction in the
|
||||
/// specified MachineBasicBlock.
|
||||
unsigned getMBBEndIdx(MachineBasicBlock *MBB) const {
|
||||
MachineInstrIndex getMBBEndIdx(MachineBasicBlock *MBB) const {
|
||||
return getMBBEndIdx(MBB->getNumber());
|
||||
}
|
||||
unsigned getMBBEndIdx(unsigned MBBNo) const {
|
||||
MachineInstrIndex getMBBEndIdx(unsigned MBBNo) const {
|
||||
assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
|
||||
return MBB2IdxMap[MBBNo].second;
|
||||
}
|
||||
@ -184,7 +204,7 @@ namespace llvm {
|
||||
|
||||
/// getMBBFromIndex - given an index in any instruction of an
|
||||
/// MBB return a pointer the MBB
|
||||
MachineBasicBlock* getMBBFromIndex(unsigned index) const {
|
||||
MachineBasicBlock* getMBBFromIndex(MachineInstrIndex index) const {
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), index);
|
||||
// Take the pair containing the index
|
||||
@ -192,14 +212,14 @@ namespace llvm {
|
||||
((I != Idx2MBBMap.end() && I->first > index) ||
|
||||
(I == Idx2MBBMap.end() && Idx2MBBMap.size()>0)) ? (I-1): I;
|
||||
|
||||
assert(J != Idx2MBBMap.end() && J->first < index+1 &&
|
||||
assert(J != Idx2MBBMap.end() && J->first <= index &&
|
||||
index <= getMBBEndIdx(J->second) &&
|
||||
"index does not correspond to an MBB");
|
||||
return J->second;
|
||||
}
|
||||
|
||||
/// getInstructionIndex - returns the base index of instr
|
||||
unsigned getInstructionIndex(const MachineInstr* instr) const {
|
||||
MachineInstrIndex getInstructionIndex(const MachineInstr* instr) const {
|
||||
Mi2IndexMap::const_iterator it = mi2iMap_.find(instr);
|
||||
assert(it != mi2iMap_.end() && "Invalid instruction!");
|
||||
return it->second;
|
||||
@ -207,48 +227,50 @@ namespace llvm {
|
||||
|
||||
/// getInstructionFromIndex - given an index in any slot of an
|
||||
/// instruction return a pointer the instruction
|
||||
MachineInstr* getInstructionFromIndex(unsigned index) const {
|
||||
index /= InstrSlots::NUM; // convert index to vector index
|
||||
assert(index < i2miMap_.size() &&
|
||||
MachineInstr* getInstructionFromIndex(MachineInstrIndex index) const {
|
||||
// convert index to vector index
|
||||
unsigned i = index.getVecIndex();
|
||||
assert(i < i2miMap_.size() &&
|
||||
"index does not correspond to an instruction");
|
||||
return i2miMap_[index];
|
||||
return i2miMap_[i];
|
||||
}
|
||||
|
||||
/// hasGapBeforeInstr - Return true if the previous instruction slot,
|
||||
/// i.e. Index - InstrSlots::NUM, is not occupied.
|
||||
bool hasGapBeforeInstr(unsigned Index) {
|
||||
Index = getBaseIndex(Index - InstrSlots::NUM);
|
||||
bool hasGapBeforeInstr(MachineInstrIndex Index) {
|
||||
Index = getBaseIndex(Index.prevIndex());
|
||||
return getInstructionFromIndex(Index) == 0;
|
||||
}
|
||||
|
||||
/// hasGapAfterInstr - Return true if the successive instruction slot,
|
||||
/// i.e. Index + InstrSlots::Num, is not occupied.
|
||||
bool hasGapAfterInstr(unsigned Index) {
|
||||
Index = getBaseIndex(Index + InstrSlots::NUM);
|
||||
bool hasGapAfterInstr(MachineInstrIndex Index) {
|
||||
Index = getBaseIndex(Index.nextIndex());
|
||||
return getInstructionFromIndex(Index) == 0;
|
||||
}
|
||||
|
||||
/// findGapBeforeInstr - Find an empty instruction slot before the
|
||||
/// specified index. If "Furthest" is true, find one that's furthest
|
||||
/// away from the index (but before any index that's occupied).
|
||||
unsigned findGapBeforeInstr(unsigned Index, bool Furthest = false) {
|
||||
Index = getBaseIndex(Index - InstrSlots::NUM);
|
||||
MachineInstrIndex findGapBeforeInstr(MachineInstrIndex Index,
|
||||
bool Furthest = false) {
|
||||
Index = getBaseIndex(Index.prevIndex());
|
||||
if (getInstructionFromIndex(Index))
|
||||
return 0; // No gap!
|
||||
return MachineInstrIndex(); // No gap!
|
||||
if (!Furthest)
|
||||
return Index;
|
||||
unsigned PrevIndex = getBaseIndex(Index - InstrSlots::NUM);
|
||||
MachineInstrIndex PrevIndex = getBaseIndex(Index.prevIndex());
|
||||
while (getInstructionFromIndex(Index)) {
|
||||
Index = PrevIndex;
|
||||
PrevIndex = getBaseIndex(Index - InstrSlots::NUM);
|
||||
PrevIndex = getBaseIndex(Index.prevIndex());
|
||||
}
|
||||
return Index;
|
||||
}
|
||||
|
||||
/// InsertMachineInstrInMaps - Insert the specified machine instruction
|
||||
/// into the instruction index map at the given index.
|
||||
void InsertMachineInstrInMaps(MachineInstr *MI, unsigned Index) {
|
||||
i2miMap_[Index / InstrSlots::NUM] = MI;
|
||||
void InsertMachineInstrInMaps(MachineInstr *MI, MachineInstrIndex Index) {
|
||||
i2miMap_[Index.index / MachineInstrIndex::NUM] = MI;
|
||||
Mi2IndexMap::iterator it = mi2iMap_.find(MI);
|
||||
assert(it == mi2iMap_.end() && "Already in map!");
|
||||
mi2iMap_[MI] = Index;
|
||||
@ -268,12 +290,12 @@ namespace llvm {
|
||||
/// findLiveInMBBs - Given a live range, if the value of the range
|
||||
/// is live in any MBB returns true as well as the list of basic blocks
|
||||
/// in which the value is live.
|
||||
bool findLiveInMBBs(unsigned Start, unsigned End,
|
||||
bool findLiveInMBBs(MachineInstrIndex Start, MachineInstrIndex End,
|
||||
SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
|
||||
|
||||
/// findReachableMBBs - Return a list MBB that can be reached via any
|
||||
/// branch or fallthroughs. Return true if the list is not empty.
|
||||
bool findReachableMBBs(unsigned Start, unsigned End,
|
||||
bool findReachableMBBs(MachineInstrIndex Start, MachineInstrIndex End,
|
||||
SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
|
||||
|
||||
// Interval creation
|
||||
@ -292,7 +314,7 @@ namespace llvm {
|
||||
/// addLiveRangeToEndOfBlock - Given a register and an instruction,
|
||||
/// adds a live range from that instruction to the end of its MBB.
|
||||
LiveRange addLiveRangeToEndOfBlock(unsigned reg,
|
||||
MachineInstr* startInst);
|
||||
MachineInstr* startInst);
|
||||
|
||||
// Interval removal
|
||||
|
||||
@ -315,7 +337,7 @@ namespace llvm {
|
||||
// MachineInstr -> index mappings
|
||||
Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
|
||||
if (mi2i != mi2iMap_.end()) {
|
||||
i2miMap_[mi2i->second/InstrSlots::NUM] = 0;
|
||||
i2miMap_[mi2i->second.index/InstrSlots::NUM] = 0;
|
||||
mi2iMap_.erase(mi2i);
|
||||
}
|
||||
}
|
||||
@ -326,10 +348,10 @@ namespace llvm {
|
||||
Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
|
||||
if (mi2i == mi2iMap_.end())
|
||||
return;
|
||||
i2miMap_[mi2i->second/InstrSlots::NUM] = NewMI;
|
||||
i2miMap_[mi2i->second.index/InstrSlots::NUM] = NewMI;
|
||||
Mi2IndexMap::iterator it = mi2iMap_.find(MI);
|
||||
assert(it != mi2iMap_.end() && "Invalid instruction!");
|
||||
unsigned Index = it->second;
|
||||
MachineInstrIndex Index = it->second;
|
||||
mi2iMap_.erase(it);
|
||||
mi2iMap_[NewMI] = Index;
|
||||
}
|
||||
@ -413,27 +435,29 @@ namespace llvm {
|
||||
/// (calls handlePhysicalRegisterDef and
|
||||
/// handleVirtualRegisterDef)
|
||||
void handleRegisterDef(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator MI, unsigned MIIdx,
|
||||
MachineBasicBlock::iterator MI,
|
||||
MachineInstrIndex MIIdx,
|
||||
MachineOperand& MO, unsigned MOIdx);
|
||||
|
||||
/// handleVirtualRegisterDef - update intervals for a virtual
|
||||
/// register def
|
||||
void handleVirtualRegisterDef(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned MIIdx, MachineOperand& MO,
|
||||
unsigned MOIdx, LiveInterval& interval);
|
||||
MachineInstrIndex MIIdx, MachineOperand& MO,
|
||||
unsigned MOIdx,
|
||||
LiveInterval& interval);
|
||||
|
||||
/// handlePhysicalRegisterDef - update intervals for a physical register
|
||||
/// def.
|
||||
void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
|
||||
MachineBasicBlock::iterator mi,
|
||||
unsigned MIIdx, MachineOperand& MO,
|
||||
MachineInstrIndex MIIdx, MachineOperand& MO,
|
||||
LiveInterval &interval,
|
||||
MachineInstr *CopyMI);
|
||||
|
||||
/// handleLiveInRegister - Create interval for a livein register.
|
||||
void handleLiveInRegister(MachineBasicBlock* mbb,
|
||||
unsigned MIIdx,
|
||||
MachineInstrIndex MIIdx,
|
||||
LiveInterval &interval, bool isAlias = false);
|
||||
|
||||
/// getReMatImplicitUse - If the remat definition MI has one (for now, we
|
||||
@ -446,7 +470,7 @@ namespace llvm {
|
||||
/// which reaches the given instruction also reaches the specified use
|
||||
/// index.
|
||||
bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
|
||||
unsigned UseIdx) const;
|
||||
MachineInstrIndex UseIdx) const;
|
||||
|
||||
/// isReMaterializable - Returns true if the definition MI of the specified
|
||||
/// val# of the specified interval is re-materializable. Also returns true
|
||||
@ -461,9 +485,9 @@ namespace llvm {
|
||||
/// MI. If it is successul, MI is updated with the newly created MI and
|
||||
/// returns true.
|
||||
bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
|
||||
MachineInstr *DefMI, unsigned InstrIdx,
|
||||
MachineInstr *DefMI, MachineInstrIndex InstrIdx,
|
||||
SmallVector<unsigned, 2> &Ops,
|
||||
bool isSS, int Slot, unsigned Reg);
|
||||
bool isSS, int FrameIndex, unsigned Reg);
|
||||
|
||||
/// canFoldMemoryOperand - Return true if the specified load / store
|
||||
/// folding is possible.
|
||||
@ -474,7 +498,8 @@ namespace llvm {
|
||||
/// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
|
||||
/// VNInfo that's after the specified index but is within the basic block.
|
||||
bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI,
|
||||
MachineBasicBlock *MBB, unsigned Idx) const;
|
||||
MachineBasicBlock *MBB,
|
||||
MachineInstrIndex Idx) const;
|
||||
|
||||
/// hasAllocatableSuperReg - Return true if the specified physical register
|
||||
/// has any super register that's allocatable.
|
||||
@ -482,16 +507,17 @@ namespace llvm {
|
||||
|
||||
/// SRInfo - Spill / restore info.
|
||||
struct SRInfo {
|
||||
int index;
|
||||
MachineInstrIndex index;
|
||||
unsigned vreg;
|
||||
bool canFold;
|
||||
SRInfo(int i, unsigned vr, bool f) : index(i), vreg(vr), canFold(f) {};
|
||||
SRInfo(MachineInstrIndex i, unsigned vr, bool f)
|
||||
: index(i), vreg(vr), canFold(f) {}
|
||||
};
|
||||
|
||||
bool alsoFoldARestore(int Id, int index, unsigned vr,
|
||||
bool alsoFoldARestore(int Id, MachineInstrIndex index, unsigned vr,
|
||||
BitVector &RestoreMBBs,
|
||||
DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
||||
void eraseRestoreInfo(int Id, int index, unsigned vr,
|
||||
void eraseRestoreInfo(int Id, MachineInstrIndex index, unsigned vr,
|
||||
BitVector &RestoreMBBs,
|
||||
DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
||||
|
||||
@ -510,8 +536,9 @@ namespace llvm {
|
||||
/// functions for addIntervalsForSpills to rewrite uses / defs for the given
|
||||
/// live range.
|
||||
bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
||||
bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
|
||||
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
|
||||
bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
|
||||
MachineInstr *MI, MachineInstr *OrigDefMI, MachineInstr *DefMI,
|
||||
unsigned Slot, int LdSlot,
|
||||
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
||||
VirtRegMap &vrm, const TargetRegisterClass* rc,
|
||||
SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
|
||||
|
@ -28,6 +28,11 @@
|
||||
#include <algorithm>
|
||||
using namespace llvm;
|
||||
|
||||
// Print a MachineInstrIndex to a raw_ostream.
|
||||
void MachineInstrIndex::print(raw_ostream &os) const {
|
||||
os << (index & ~PHI_BIT);
|
||||
}
|
||||
|
||||
// An example for liveAt():
|
||||
//
|
||||
// this = [1,4), liveAt(0) will return false. The instruction defining this
|
||||
@ -35,7 +40,7 @@ using namespace llvm;
|
||||
// variable it represents. This is because slot 1 is used (def slot) and spans
|
||||
// up to slot 3 (store slot).
|
||||
//
|
||||
bool LiveInterval::liveAt(unsigned I) const {
|
||||
bool LiveInterval::liveAt(MachineInstrIndex I) const {
|
||||
Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
|
||||
|
||||
if (r == ranges.begin())
|
||||
@ -48,7 +53,7 @@ bool LiveInterval::liveAt(unsigned I) const {
|
||||
// liveBeforeAndAt - Check if the interval is live at the index and the index
|
||||
// just before it. If index is liveAt, check if it starts a new live range.
|
||||
// If it does, then check if the previous live range ends at index-1.
|
||||
bool LiveInterval::liveBeforeAndAt(unsigned I) const {
|
||||
bool LiveInterval::liveBeforeAndAt(MachineInstrIndex I) const {
|
||||
Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
|
||||
|
||||
if (r == ranges.begin())
|
||||
@ -126,7 +131,7 @@ bool LiveInterval::overlapsFrom(const LiveInterval& other,
|
||||
|
||||
/// overlaps - Return true if the live interval overlaps a range specified
|
||||
/// by [Start, End).
|
||||
bool LiveInterval::overlaps(unsigned Start, unsigned End) const {
|
||||
bool LiveInterval::overlaps(MachineInstrIndex Start, MachineInstrIndex End) const {
|
||||
assert(Start < End && "Invalid range");
|
||||
const_iterator I = begin();
|
||||
const_iterator E = end();
|
||||
@ -144,10 +149,10 @@ bool LiveInterval::overlaps(unsigned Start, unsigned End) const {
|
||||
/// specified by I to end at the specified endpoint. To do this, we should
|
||||
/// merge and eliminate all ranges that this will overlap with. The iterator is
|
||||
/// not invalidated.
|
||||
void LiveInterval::extendIntervalEndTo(Ranges::iterator I, unsigned NewEnd) {
|
||||
void LiveInterval::extendIntervalEndTo(Ranges::iterator I, MachineInstrIndex NewEnd) {
|
||||
assert(I != ranges.end() && "Not a valid interval!");
|
||||
VNInfo *ValNo = I->valno;
|
||||
unsigned OldEnd = I->end;
|
||||
MachineInstrIndex OldEnd = I->end;
|
||||
|
||||
// Search for the first interval that we can't merge with.
|
||||
Ranges::iterator MergeTo = next(I);
|
||||
@ -162,7 +167,7 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, unsigned NewEnd) {
|
||||
ranges.erase(next(I), MergeTo);
|
||||
|
||||
// Update kill info.
|
||||
removeKills(ValNo, OldEnd, I->end-1);
|
||||
ValNo->removeKills(OldEnd, I->end.prevSlot());
|
||||
|
||||
// If the newly formed range now touches the range after it and if they have
|
||||
// the same value number, merge the two ranges into one range.
|
||||
@ -178,7 +183,7 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, unsigned NewEnd) {
|
||||
/// specified by I to start at the specified endpoint. To do this, we should
|
||||
/// merge and eliminate all ranges that this will overlap with.
|
||||
LiveInterval::Ranges::iterator
|
||||
LiveInterval::extendIntervalStartTo(Ranges::iterator I, unsigned NewStart) {
|
||||
LiveInterval::extendIntervalStartTo(Ranges::iterator I, MachineInstrIndex NewStart) {
|
||||
assert(I != ranges.end() && "Not a valid interval!");
|
||||
VNInfo *ValNo = I->valno;
|
||||
|
||||
@ -211,7 +216,7 @@ LiveInterval::extendIntervalStartTo(Ranges::iterator I, unsigned NewStart) {
|
||||
|
||||
LiveInterval::iterator
|
||||
LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
|
||||
unsigned Start = LR.start, End = LR.end;
|
||||
MachineInstrIndex Start = LR.start, End = LR.end;
|
||||
iterator it = std::upper_bound(From, ranges.end(), Start);
|
||||
|
||||
// If the inserted interval starts in the middle or right at the end of
|
||||
@ -245,7 +250,7 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
|
||||
extendIntervalEndTo(it, End);
|
||||
else if (End < it->end)
|
||||
// Overlapping intervals, there might have been a kill here.
|
||||
removeKill(it->valno, End);
|
||||
it->valno->removeKill(End);
|
||||
return it;
|
||||
}
|
||||
} else {
|
||||
@ -261,33 +266,32 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
|
||||
return ranges.insert(it, LR);
|
||||
}
|
||||
|
||||
/// isInOneLiveRange - Return true if the range specified is entirely in the
|
||||
/// isInOneLiveRange - Return true if the range specified is entirely in
|
||||
/// a single LiveRange of the live interval.
|
||||
bool LiveInterval::isInOneLiveRange(unsigned Start, unsigned End) {
|
||||
bool LiveInterval::isInOneLiveRange(MachineInstrIndex Start, MachineInstrIndex End) {
|
||||
Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
|
||||
if (I == ranges.begin())
|
||||
return false;
|
||||
--I;
|
||||
return I->contains(Start) && I->contains(End-1);
|
||||
return I->containsRange(Start, End);
|
||||
}
|
||||
|
||||
|
||||
/// removeRange - Remove the specified range from this interval. Note that
|
||||
/// the range must be in a single LiveRange in its entirety.
|
||||
void LiveInterval::removeRange(unsigned Start, unsigned End,
|
||||
void LiveInterval::removeRange(MachineInstrIndex Start, MachineInstrIndex End,
|
||||
bool RemoveDeadValNo) {
|
||||
// Find the LiveRange containing this span.
|
||||
Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
|
||||
assert(I != ranges.begin() && "Range is not in interval!");
|
||||
--I;
|
||||
assert(I->contains(Start) && I->contains(End-1) &&
|
||||
"Range is not entirely in interval!");
|
||||
assert(I->containsRange(Start, End) && "Range is not entirely in interval!");
|
||||
|
||||
// If the span we are removing is at the start of the LiveRange, adjust it.
|
||||
VNInfo *ValNo = I->valno;
|
||||
if (I->start == Start) {
|
||||
if (I->end == End) {
|
||||
removeKills(I->valno, Start, End);
|
||||
ValNo->removeKills(Start, End);
|
||||
if (RemoveDeadValNo) {
|
||||
// Check if val# is dead.
|
||||
bool isDead = true;
|
||||
@ -321,13 +325,13 @@ void LiveInterval::removeRange(unsigned Start, unsigned End,
|
||||
// Otherwise if the span we are removing is at the end of the LiveRange,
|
||||
// adjust the other way.
|
||||
if (I->end == End) {
|
||||
removeKills(ValNo, Start, End);
|
||||
ValNo->removeKills(Start, End);
|
||||
I->end = Start;
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise, we are splitting the LiveRange into two pieces.
|
||||
unsigned OldEnd = I->end;
|
||||
MachineInstrIndex OldEnd = I->end;
|
||||
I->end = Start; // Trim the old interval.
|
||||
|
||||
// Insert the new one.
|
||||
@ -361,11 +365,12 @@ void LiveInterval::removeValNo(VNInfo *ValNo) {
|
||||
|
||||
/// scaleNumbering - Renumber VNI and ranges to provide gaps for new
|
||||
/// instructions.
|
||||
|
||||
void LiveInterval::scaleNumbering(unsigned factor) {
|
||||
// Scale ranges.
|
||||
for (iterator RI = begin(), RE = end(); RI != RE; ++RI) {
|
||||
RI->start = InstrSlots::scale(RI->start, factor);
|
||||
RI->end = InstrSlots::scale(RI->end, factor);
|
||||
RI->start = RI->start.scale(factor);
|
||||
RI->end = RI->end.scale(factor);
|
||||
}
|
||||
|
||||
// Scale VNI info.
|
||||
@ -373,20 +378,20 @@ void LiveInterval::scaleNumbering(unsigned factor) {
|
||||
VNInfo *vni = *VNI;
|
||||
|
||||
if (vni->isDefAccurate())
|
||||
vni->def = InstrSlots::scale(vni->def, factor);
|
||||
vni->def = vni->def.scale(factor);
|
||||
|
||||
for (unsigned i = 0; i < vni->kills.size(); ++i) {
|
||||
if (!vni->kills[i].isPHIKill)
|
||||
vni->kills[i].killIdx =
|
||||
InstrSlots::scale(vni->kills[i].killIdx, factor);
|
||||
if (!vni->kills[i].isPHIIndex())
|
||||
vni->kills[i] = vni->kills[i].scale(factor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// getLiveRangeContaining - Return the live range that contains the
|
||||
/// specified index, or null if there is none.
|
||||
LiveInterval::const_iterator
|
||||
LiveInterval::FindLiveRangeContaining(unsigned Idx) const {
|
||||
LiveInterval::FindLiveRangeContaining(MachineInstrIndex Idx) const {
|
||||
const_iterator It = std::upper_bound(begin(), end(), Idx);
|
||||
if (It != ranges.begin()) {
|
||||
--It;
|
||||
@ -398,7 +403,7 @@ LiveInterval::FindLiveRangeContaining(unsigned Idx) const {
|
||||
}
|
||||
|
||||
LiveInterval::iterator
|
||||
LiveInterval::FindLiveRangeContaining(unsigned Idx) {
|
||||
LiveInterval::FindLiveRangeContaining(MachineInstrIndex Idx) {
|
||||
iterator It = std::upper_bound(begin(), end(), Idx);
|
||||
if (It != begin()) {
|
||||
--It;
|
||||
@ -409,17 +414,27 @@ LiveInterval::FindLiveRangeContaining(unsigned Idx) {
|
||||
return end();
|
||||
}
|
||||
|
||||
/// findDefinedVNInfo - Find the VNInfo that's defined at the specified index
|
||||
/// (register interval) or defined by the specified register (stack inteval).
|
||||
VNInfo *LiveInterval::findDefinedVNInfo(unsigned DefIdxOrReg) const {
|
||||
VNInfo *VNI = NULL;
|
||||
/// findDefinedVNInfo - Find the VNInfo defined by the specified
|
||||
/// index (register interval).
|
||||
VNInfo *LiveInterval::findDefinedVNInfoForRegInt(MachineInstrIndex Idx) const {
|
||||
for (LiveInterval::const_vni_iterator i = vni_begin(), e = vni_end();
|
||||
i != e; ++i)
|
||||
if ((*i)->def == DefIdxOrReg) {
|
||||
VNI = *i;
|
||||
break;
|
||||
}
|
||||
return VNI;
|
||||
i != e; ++i) {
|
||||
if ((*i)->def == Idx)
|
||||
return *i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// findDefinedVNInfo - Find the VNInfo defined by the specified
|
||||
/// register (stack inteval).
|
||||
VNInfo *LiveInterval::findDefinedVNInfoForStackInt(unsigned reg) const {
|
||||
for (LiveInterval::const_vni_iterator i = vni_begin(), e = vni_end();
|
||||
i != e; ++i) {
|
||||
if ((*i)->getReg() == reg)
|
||||
return *i;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// join - Join two live intervals (this, and other) together. This applies
|
||||
@ -546,7 +561,7 @@ void LiveInterval::MergeValueInAsValue(const LiveInterval &RHS,
|
||||
for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
|
||||
if (I->valno != RHSValNo)
|
||||
continue;
|
||||
unsigned Start = I->start, End = I->end;
|
||||
MachineInstrIndex Start = I->start, End = I->end;
|
||||
IP = std::upper_bound(IP, end(), Start);
|
||||
// If the start of this range overlaps with an existing liverange, trim it.
|
||||
if (IP != begin() && IP[-1].end > Start) {
|
||||
@ -622,20 +637,21 @@ void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
|
||||
else if (UnusedValNo)
|
||||
ClobberValNo = UnusedValNo;
|
||||
else {
|
||||
UnusedValNo = ClobberValNo = getNextValue(0, 0, false, VNInfoAllocator);
|
||||
UnusedValNo = ClobberValNo =
|
||||
getNextValue(MachineInstrIndex(), 0, false, VNInfoAllocator);
|
||||
ValNoMaps.insert(std::make_pair(I->valno, ClobberValNo));
|
||||
}
|
||||
|
||||
bool Done = false;
|
||||
unsigned Start = I->start, End = I->end;
|
||||
MachineInstrIndex Start = I->start, End = I->end;
|
||||
// If a clobber range starts before an existing range and ends after
|
||||
// it, the clobber range will need to be split into multiple ranges.
|
||||
// Loop until the entire clobber range is handled.
|
||||
while (!Done) {
|
||||
Done = true;
|
||||
IP = std::upper_bound(IP, end(), Start);
|
||||
unsigned SubRangeStart = Start;
|
||||
unsigned SubRangeEnd = End;
|
||||
MachineInstrIndex SubRangeStart = Start;
|
||||
MachineInstrIndex SubRangeEnd = End;
|
||||
|
||||
// If the start of this range overlaps with an existing liverange, trim it.
|
||||
if (IP != begin() && IP[-1].end > SubRangeStart) {
|
||||
@ -671,11 +687,13 @@ void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
|
||||
}
|
||||
}
|
||||
|
||||
void LiveInterval::MergeInClobberRange(unsigned Start, unsigned End,
|
||||
void LiveInterval::MergeInClobberRange(MachineInstrIndex Start,
|
||||
MachineInstrIndex End,
|
||||
BumpPtrAllocator &VNInfoAllocator) {
|
||||
// Find a value # to use for the clobber ranges. If there is already a value#
|
||||
// for unknown values, use it.
|
||||
VNInfo *ClobberValNo = getNextValue(0, 0, false, VNInfoAllocator);
|
||||
VNInfo *ClobberValNo =
|
||||
getNextValue(MachineInstrIndex(), 0, false, VNInfoAllocator);
|
||||
|
||||
iterator IP = begin();
|
||||
IP = std::upper_bound(IP, end(), Start);
|
||||
@ -788,7 +806,7 @@ void LiveInterval::Copy(const LiveInterval &RHS,
|
||||
unsigned LiveInterval::getSize() const {
|
||||
unsigned Sum = 0;
|
||||
for (const_iterator I = begin(), E = end(); I != E; ++I)
|
||||
Sum += I->end - I->start;
|
||||
Sum += I->start.distance(I->end);
|
||||
return Sum;
|
||||
}
|
||||
|
||||
@ -862,8 +880,8 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
|
||||
if (ee || vni->hasPHIKill()) {
|
||||
OS << "-(";
|
||||
for (unsigned j = 0; j != ee; ++j) {
|
||||
OS << vni->kills[j].killIdx;
|
||||
if (vni->kills[j].isPHIKill)
|
||||
OS << vni->kills[j];
|
||||
if (vni->kills[j].isPHIIndex())
|
||||
OS << "*";
|
||||
if (j != ee-1)
|
||||
OS << " ";
|
||||
|
@ -254,6 +254,7 @@ void LiveIntervals::processImplicitDefs() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LiveIntervals::computeNumbering() {
|
||||
Index2MiMap OldI2MI = i2miMap_;
|
||||
std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
|
||||
@ -268,15 +269,16 @@ void LiveIntervals::computeNumbering() {
|
||||
|
||||
// Number MachineInstrs and MachineBasicBlocks.
|
||||
// Initialize MBB indexes to a sentinal.
|
||||
MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
|
||||
MBB2IdxMap.resize(mf_->getNumBlockIDs(),
|
||||
std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
|
||||
|
||||
unsigned MIIndex = 0;
|
||||
MachineInstrIndex MIIndex;
|
||||
for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
|
||||
MBB != E; ++MBB) {
|
||||
unsigned StartIdx = MIIndex;
|
||||
MachineInstrIndex StartIdx = MIIndex;
|
||||
|
||||
// Insert an empty slot at the beginning of each block.
|
||||
MIIndex += InstrSlots::NUM;
|
||||
MIIndex = MIIndex.nextIndex();
|
||||
i2miMap_.push_back(0);
|
||||
|
||||
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
|
||||
@ -285,47 +287,51 @@ void LiveIntervals::computeNumbering() {
|
||||
if (I == MBB->getFirstTerminator()) {
|
||||
// Leave a gap for before terminators, this is where we will point
|
||||
// PHI kills.
|
||||
MachineInstrIndex tGap(true, MIIndex);
|
||||
bool inserted =
|
||||
terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
|
||||
terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
|
||||
assert(inserted &&
|
||||
"Multiple 'first' terminators encountered during numbering.");
|
||||
inserted = inserted; // Avoid compiler warning if assertions turned off.
|
||||
i2miMap_.push_back(0);
|
||||
|
||||
MIIndex += InstrSlots::NUM;
|
||||
MIIndex = MIIndex.nextIndex();
|
||||
}
|
||||
|
||||
bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
|
||||
assert(inserted && "multiple MachineInstr -> index mappings");
|
||||
inserted = true;
|
||||
i2miMap_.push_back(I);
|
||||
MIIndex += InstrSlots::NUM;
|
||||
MIIndex = MIIndex.nextIndex();
|
||||
FunctionSize++;
|
||||
|
||||
// Insert max(1, numdefs) empty slots after every instruction.
|
||||
unsigned Slots = I->getDesc().getNumDefs();
|
||||
if (Slots == 0)
|
||||
Slots = 1;
|
||||
MIIndex += InstrSlots::NUM * Slots;
|
||||
while (Slots--)
|
||||
while (Slots--) {
|
||||
MIIndex = MIIndex.nextIndex();
|
||||
i2miMap_.push_back(0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (MBB->getFirstTerminator() == MBB->end()) {
|
||||
// Leave a gap for before terminators, this is where we will point
|
||||
// PHI kills.
|
||||
MachineInstrIndex tGap(true, MIIndex);
|
||||
bool inserted =
|
||||
terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
|
||||
terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
|
||||
assert(inserted &&
|
||||
"Multiple 'first' terminators encountered during numbering.");
|
||||
inserted = inserted; // Avoid compiler warning if assertions turned off.
|
||||
i2miMap_.push_back(0);
|
||||
|
||||
MIIndex += InstrSlots::NUM;
|
||||
MIIndex = MIIndex.nextIndex();
|
||||
}
|
||||
|
||||
// Set the MBB2IdxMap entry for this MBB.
|
||||
MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
|
||||
MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex.prevSlot());
|
||||
Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
|
||||
}
|
||||
|
||||
@ -340,9 +346,9 @@ void LiveIntervals::computeNumbering() {
|
||||
// number, or our best guess at what it _should_ correspond to if the
|
||||
// original instruction has been erased. This is either the following
|
||||
// instruction or its predecessor.
|
||||
unsigned index = LI->start / InstrSlots::NUM;
|
||||
unsigned offset = LI->start % InstrSlots::NUM;
|
||||
if (offset == InstrSlots::LOAD) {
|
||||
unsigned index = LI->start.getVecIndex();
|
||||
MachineInstrIndex::Slot offset = LI->start.getSlot();
|
||||
if (LI->start.isLoad()) {
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
|
||||
// Take the pair containing the index
|
||||
@ -351,29 +357,34 @@ void LiveIntervals::computeNumbering() {
|
||||
|
||||
LI->start = getMBBStartIdx(J->second);
|
||||
} else {
|
||||
LI->start = mi2iMap_[OldI2MI[index]] + offset;
|
||||
LI->start = MachineInstrIndex(
|
||||
MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
|
||||
(MachineInstrIndex::Slot)offset);
|
||||
}
|
||||
|
||||
// Remap the ending index in the same way that we remapped the start,
|
||||
// except for the final step where we always map to the immediately
|
||||
// following instruction.
|
||||
index = (LI->end - 1) / InstrSlots::NUM;
|
||||
offset = LI->end % InstrSlots::NUM;
|
||||
if (offset == InstrSlots::LOAD) {
|
||||
index = (LI->end.prevSlot()).getVecIndex();
|
||||
offset = LI->end.getSlot();
|
||||
if (LI->end.isLoad()) {
|
||||
// VReg dies at end of block.
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
|
||||
--I;
|
||||
|
||||
LI->end = getMBBEndIdx(I->second) + 1;
|
||||
LI->end = getMBBEndIdx(I->second).nextSlot();
|
||||
} else {
|
||||
unsigned idx = index;
|
||||
while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
|
||||
|
||||
if (index != OldI2MI.size())
|
||||
LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
|
||||
LI->end =
|
||||
MachineInstrIndex(mi2iMap_[OldI2MI[index]],
|
||||
(idx == index ? offset : MachineInstrIndex::LOAD));
|
||||
else
|
||||
LI->end = InstrSlots::NUM * i2miMap_.size();
|
||||
LI->end =
|
||||
MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -385,9 +396,9 @@ void LiveIntervals::computeNumbering() {
|
||||
// start indices above. VN's with special sentinel defs
|
||||
// don't need to be remapped.
|
||||
if (vni->isDefAccurate() && !vni->isUnused()) {
|
||||
unsigned index = vni->def / InstrSlots::NUM;
|
||||
unsigned offset = vni->def % InstrSlots::NUM;
|
||||
if (offset == InstrSlots::LOAD) {
|
||||
unsigned index = vni->def.getVecIndex();
|
||||
MachineInstrIndex::Slot offset = vni->def.getSlot();
|
||||
if (vni->def.isLoad()) {
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
|
||||
// Take the pair containing the index
|
||||
@ -396,19 +407,17 @@ void LiveIntervals::computeNumbering() {
|
||||
|
||||
vni->def = getMBBStartIdx(J->second);
|
||||
} else {
|
||||
vni->def = mi2iMap_[OldI2MI[index]] + offset;
|
||||
vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
|
||||
}
|
||||
}
|
||||
|
||||
// Remap the VNInfo kill indices, which works the same as
|
||||
// the end indices above.
|
||||
for (size_t i = 0; i < vni->kills.size(); ++i) {
|
||||
unsigned killIdx = vni->kills[i].killIdx;
|
||||
unsigned index = vni->kills[i].prevSlot().getVecIndex();
|
||||
MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
|
||||
|
||||
unsigned index = (killIdx - 1) / InstrSlots::NUM;
|
||||
unsigned offset = killIdx % InstrSlots::NUM;
|
||||
|
||||
if (offset == InstrSlots::LOAD) {
|
||||
if (vni->kills[i].isLoad()) {
|
||||
assert("Value killed at a load slot.");
|
||||
/*std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
|
||||
@ -416,15 +425,15 @@ void LiveIntervals::computeNumbering() {
|
||||
|
||||
vni->kills[i] = getMBBEndIdx(I->second);*/
|
||||
} else {
|
||||
if (vni->kills[i].isPHIKill) {
|
||||
if (vni->kills[i].isPHIIndex()) {
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
|
||||
std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
|
||||
--I;
|
||||
vni->kills[i].killIdx = terminatorGaps[I->second];
|
||||
vni->kills[i] = terminatorGaps[I->second];
|
||||
} else {
|
||||
assert(OldI2MI[index] != 0 &&
|
||||
"Kill refers to instruction not present in index maps.");
|
||||
vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
|
||||
vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -454,18 +463,18 @@ void LiveIntervals::scaleNumbering(int factor) {
|
||||
Idx2MBBMap.clear();
|
||||
for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
|
||||
MBB != MBBE; ++MBB) {
|
||||
std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
|
||||
mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
|
||||
mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
|
||||
std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
|
||||
mbbIndices.first = mbbIndices.first.scale(factor);
|
||||
mbbIndices.second = mbbIndices.second.scale(factor);
|
||||
Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
|
||||
}
|
||||
std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
|
||||
|
||||
// Scale terminator gaps.
|
||||
for (DenseMap<MachineBasicBlock*, unsigned>::iterator
|
||||
for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
|
||||
TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
|
||||
TGI != TGE; ++TGI) {
|
||||
terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
|
||||
terminatorGaps[TGI->first] = TGI->second.scale(factor);
|
||||
}
|
||||
|
||||
// Scale the intervals.
|
||||
@ -475,19 +484,20 @@ void LiveIntervals::scaleNumbering(int factor) {
|
||||
|
||||
// Scale MachineInstrs.
|
||||
Mi2IndexMap oldmi2iMap = mi2iMap_;
|
||||
unsigned highestSlot = 0;
|
||||
MachineInstrIndex highestSlot;
|
||||
for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
|
||||
MI != ME; ++MI) {
|
||||
unsigned newSlot = InstrSlots::scale(MI->second, factor);
|
||||
MachineInstrIndex newSlot = MI->second.scale(factor);
|
||||
mi2iMap_[MI->first] = newSlot;
|
||||
highestSlot = std::max(highestSlot, newSlot);
|
||||
}
|
||||
|
||||
unsigned highestVIndex = highestSlot.getVecIndex();
|
||||
i2miMap_.clear();
|
||||
i2miMap_.resize(highestSlot + 1);
|
||||
i2miMap_.resize(highestVIndex + 1);
|
||||
for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
|
||||
MI != ME; ++MI) {
|
||||
i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
|
||||
i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
|
||||
}
|
||||
|
||||
}
|
||||
@ -541,12 +551,12 @@ bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
|
||||
VirtRegMap &vrm, unsigned reg) {
|
||||
for (LiveInterval::Ranges::const_iterator
|
||||
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
|
||||
for (unsigned index = getBaseIndex(I->start),
|
||||
end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
|
||||
index += InstrSlots::NUM) {
|
||||
for (MachineInstrIndex index = getBaseIndex(I->start),
|
||||
end = getBaseIndex(I->end.prevSlot()).nextIndex(); index != end;
|
||||
index = index.nextIndex()) {
|
||||
// skip deleted instructions
|
||||
while (index != end && !getInstructionFromIndex(index))
|
||||
index += InstrSlots::NUM;
|
||||
index = index.nextIndex();
|
||||
if (index == end) break;
|
||||
|
||||
MachineInstr *MI = getInstructionFromIndex(index);
|
||||
@ -582,16 +592,16 @@ bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
|
||||
SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
|
||||
for (LiveInterval::Ranges::const_iterator
|
||||
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
|
||||
for (unsigned index = getBaseIndex(I->start),
|
||||
end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
|
||||
index += InstrSlots::NUM) {
|
||||
for (MachineInstrIndex index = getBaseIndex(I->start),
|
||||
end = getBaseIndex(I->end.prevSlot()).nextIndex(); index != end;
|
||||
index = index.nextIndex()) {
|
||||
// Skip deleted instructions.
|
||||
MachineInstr *MI = 0;
|
||||
while (index != end) {
|
||||
MI = getInstructionFromIndex(index);
|
||||
if (MI)
|
||||
break;
|
||||
index += InstrSlots::NUM;
|
||||
index = index.nextIndex();
|
||||
}
|
||||
if (index == end) break;
|
||||
|
||||
@ -625,7 +635,8 @@ void LiveIntervals::printRegName(unsigned reg) const {
|
||||
|
||||
void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
MachineBasicBlock::iterator mi,
|
||||
unsigned MIIdx, MachineOperand& MO,
|
||||
MachineInstrIndex MIIdx,
|
||||
MachineOperand& MO,
|
||||
unsigned MOIdx,
|
||||
LiveInterval &interval) {
|
||||
DEBUG({
|
||||
@ -640,7 +651,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
|
||||
if (interval.empty()) {
|
||||
// Get the Idx of the defining instructions.
|
||||
unsigned defIndex = getDefIndex(MIIdx);
|
||||
MachineInstrIndex defIndex = getDefIndex(MIIdx);
|
||||
// Earlyclobbers move back one.
|
||||
if (MO.isEarlyClobber())
|
||||
defIndex = getUseIndex(MIIdx);
|
||||
@ -663,11 +674,11 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
// will be a single kill, in MBB, which comes after the definition.
|
||||
if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
|
||||
// FIXME: what about dead vars?
|
||||
unsigned killIdx;
|
||||
MachineInstrIndex killIdx;
|
||||
if (vi.Kills[0] != mi)
|
||||
killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
|
||||
killIdx = getUseIndex(getInstructionIndex(vi.Kills[0])).nextSlot();
|
||||
else
|
||||
killIdx = defIndex+1;
|
||||
killIdx = defIndex.nextSlot();
|
||||
|
||||
// If the kill happens after the definition, we have an intra-block
|
||||
// live range.
|
||||
@ -677,7 +688,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
LiveRange LR(defIndex, killIdx, ValNo);
|
||||
interval.addRange(LR);
|
||||
DEBUG(errs() << " +" << LR << "\n");
|
||||
interval.addKill(ValNo, killIdx, false);
|
||||
ValNo->addKill(killIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -686,7 +697,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
// of the defining block, potentially live across some blocks, then is
|
||||
// live into some number of blocks, but gets killed. Start by adding a
|
||||
// range that goes from this definition to the end of the defining block.
|
||||
LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
|
||||
LiveRange NewLR(defIndex, getMBBEndIdx(mbb).nextSlot(), ValNo);
|
||||
DEBUG(errs() << " +" << NewLR);
|
||||
interval.addRange(NewLR);
|
||||
|
||||
@ -696,7 +707,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
|
||||
E = vi.AliveBlocks.end(); I != E; ++I) {
|
||||
LiveRange LR(getMBBStartIdx(*I),
|
||||
getMBBEndIdx(*I)+1, // MBB ends at -1.
|
||||
getMBBEndIdx(*I).nextSlot(), // MBB ends at -1.
|
||||
ValNo);
|
||||
interval.addRange(LR);
|
||||
DEBUG(errs() << " +" << LR);
|
||||
@ -706,11 +717,11 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
// block to the 'use' slot of the killing instruction.
|
||||
for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
|
||||
MachineInstr *Kill = vi.Kills[i];
|
||||
unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
|
||||
MachineInstrIndex killIdx = getUseIndex(getInstructionIndex(Kill)).nextSlot();
|
||||
LiveRange LR(getMBBStartIdx(Kill->getParent()),
|
||||
killIdx, ValNo);
|
||||
interval.addRange(LR);
|
||||
interval.addKill(ValNo, killIdx, false);
|
||||
ValNo->addKill(killIdx);
|
||||
DEBUG(errs() << " +" << LR);
|
||||
}
|
||||
|
||||
@ -726,12 +737,12 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
// need to take the LiveRegion that defines this register and split it
|
||||
// into two values.
|
||||
assert(interval.containsOneValue());
|
||||
unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
|
||||
unsigned RedefIndex = getDefIndex(MIIdx);
|
||||
MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
|
||||
MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
|
||||
if (MO.isEarlyClobber())
|
||||
RedefIndex = getUseIndex(MIIdx);
|
||||
|
||||
const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
|
||||
const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex.prevSlot());
|
||||
VNInfo *OldValNo = OldLR->valno;
|
||||
|
||||
// Delete the initial value, which should be short and continuous,
|
||||
@ -759,12 +770,12 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
LiveRange LR(DefIndex, RedefIndex, ValNo);
|
||||
DEBUG(errs() << " replace range with " << LR);
|
||||
interval.addRange(LR);
|
||||
interval.addKill(ValNo, RedefIndex, false);
|
||||
ValNo->addKill(RedefIndex);
|
||||
|
||||
// If this redefinition is dead, we need to add a dummy unit live
|
||||
// range covering the def slot.
|
||||
if (MO.isDead())
|
||||
interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
|
||||
interval.addRange(LiveRange(RedefIndex, RedefIndex.nextSlot(), OldValNo));
|
||||
|
||||
DEBUG({
|
||||
errs() << " RESULT: ";
|
||||
@ -781,8 +792,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
// Remove the old range that we now know has an incorrect number.
|
||||
VNInfo *VNI = interval.getValNumInfo(0);
|
||||
MachineInstr *Killer = vi.Kills[0];
|
||||
unsigned Start = getMBBStartIdx(Killer->getParent());
|
||||
unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
|
||||
MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
|
||||
MachineInstrIndex End = getUseIndex(getInstructionIndex(Killer)).nextSlot();
|
||||
DEBUG({
|
||||
errs() << " Removing [" << Start << "," << End << "] from: ";
|
||||
interval.print(errs(), tri_);
|
||||
@ -791,8 +802,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
interval.removeRange(Start, End);
|
||||
assert(interval.ranges.size() == 1 &&
|
||||
"newly discovered PHI interval has >1 ranges.");
|
||||
MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
|
||||
interval.addKill(VNI, terminatorGaps[killMBB], true);
|
||||
MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
|
||||
VNI->addKill(terminatorGaps[killMBB]);
|
||||
VNI->setHasPHIKill(true);
|
||||
DEBUG({
|
||||
errs() << " RESULT: ";
|
||||
@ -802,11 +813,12 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
// Replace the interval with one of a NEW value number. Note that this
|
||||
// value number isn't actually defined by an instruction, weird huh? :)
|
||||
LiveRange LR(Start, End,
|
||||
interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
|
||||
interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
|
||||
0, false, VNInfoAllocator));
|
||||
LR.valno->setIsPHIDef(true);
|
||||
DEBUG(errs() << " replace range with " << LR);
|
||||
interval.addRange(LR);
|
||||
interval.addKill(LR.valno, End, false);
|
||||
LR.valno->addKill(End);
|
||||
DEBUG({
|
||||
errs() << " RESULT: ";
|
||||
interval.print(errs(), tri_);
|
||||
@ -816,7 +828,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
// In the case of PHI elimination, each variable definition is only
|
||||
// live until the end of the block. We've already taken care of the
|
||||
// rest of the live range.
|
||||
unsigned defIndex = getDefIndex(MIIdx);
|
||||
MachineInstrIndex defIndex = getDefIndex(MIIdx);
|
||||
if (MO.isEarlyClobber())
|
||||
defIndex = getUseIndex(MIIdx);
|
||||
|
||||
@ -830,10 +842,10 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
CopyMI = mi;
|
||||
ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
|
||||
|
||||
unsigned killIndex = getMBBEndIdx(mbb) + 1;
|
||||
MachineInstrIndex killIndex = getMBBEndIdx(mbb).nextSlot();
|
||||
LiveRange LR(defIndex, killIndex, ValNo);
|
||||
interval.addRange(LR);
|
||||
interval.addKill(ValNo, terminatorGaps[mbb], true);
|
||||
ValNo->addKill(terminatorGaps[mbb]);
|
||||
ValNo->setHasPHIKill(true);
|
||||
DEBUG(errs() << " +" << LR);
|
||||
}
|
||||
@ -844,7 +856,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
|
||||
|
||||
void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator mi,
|
||||
unsigned MIIdx,
|
||||
MachineInstrIndex MIIdx,
|
||||
MachineOperand& MO,
|
||||
LiveInterval &interval,
|
||||
MachineInstr *CopyMI) {
|
||||
@ -855,33 +867,33 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
|
||||
printRegName(interval.reg);
|
||||
});
|
||||
|
||||
unsigned baseIndex = MIIdx;
|
||||
unsigned start = getDefIndex(baseIndex);
|
||||
MachineInstrIndex baseIndex = MIIdx;
|
||||
MachineInstrIndex start = getDefIndex(baseIndex);
|
||||
// Earlyclobbers move back one.
|
||||
if (MO.isEarlyClobber())
|
||||
start = getUseIndex(MIIdx);
|
||||
unsigned end = start;
|
||||
MachineInstrIndex end = start;
|
||||
|
||||
// If it is not used after definition, it is considered dead at
|
||||
// the instruction defining it. Hence its interval is:
|
||||
// [defSlot(def), defSlot(def)+1)
|
||||
if (MO.isDead()) {
|
||||
DEBUG(errs() << " dead");
|
||||
end = start + 1;
|
||||
end = start.nextSlot();
|
||||
goto exit;
|
||||
}
|
||||
|
||||
// If it is not dead on definition, it must be killed by a
|
||||
// subsequent instruction. Hence its interval is:
|
||||
// [defSlot(def), useSlot(kill)+1)
|
||||
baseIndex += InstrSlots::NUM;
|
||||
baseIndex = baseIndex.nextIndex();
|
||||
while (++mi != MBB->end()) {
|
||||
while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
|
||||
while (baseIndex.getVecIndex() < i2miMap_.size() &&
|
||||
getInstructionFromIndex(baseIndex) == 0)
|
||||
baseIndex += InstrSlots::NUM;
|
||||
baseIndex = baseIndex.nextIndex();
|
||||
if (mi->killsRegister(interval.reg, tri_)) {
|
||||
DEBUG(errs() << " killed");
|
||||
end = getUseIndex(baseIndex) + 1;
|
||||
end = getUseIndex(baseIndex).nextSlot();
|
||||
goto exit;
|
||||
} else {
|
||||
int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
|
||||
@ -897,20 +909,20 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
|
||||
// it. Hence its interval is:
|
||||
// [defSlot(def), defSlot(def)+1)
|
||||
DEBUG(errs() << " dead");
|
||||
end = start + 1;
|
||||
end = start.nextSlot();
|
||||
}
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
baseIndex += InstrSlots::NUM;
|
||||
baseIndex = baseIndex.nextIndex();
|
||||
}
|
||||
|
||||
// The only case we should have a dead physreg here without a killing or
|
||||
// instruction where we know it's dead is if it is live-in to the function
|
||||
// and never used. Another possible case is the implicit use of the
|
||||
// physical register has been deleted by two-address pass.
|
||||
end = start + 1;
|
||||
end = start.nextSlot();
|
||||
|
||||
exit:
|
||||
assert(start < end && "did not find end of interval?");
|
||||
@ -924,13 +936,13 @@ exit:
|
||||
ValNo->setHasRedefByEC(true);
|
||||
LiveRange LR(start, end, ValNo);
|
||||
interval.addRange(LR);
|
||||
interval.addKill(LR.valno, end, false);
|
||||
LR.valno->addKill(end);
|
||||
DEBUG(errs() << " +" << LR << '\n');
|
||||
}
|
||||
|
||||
void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned MIIdx,
|
||||
MachineInstrIndex MIIdx,
|
||||
MachineOperand& MO,
|
||||
unsigned MOIdx) {
|
||||
if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
|
||||
@ -957,7 +969,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
|
||||
}
|
||||
|
||||
void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
|
||||
unsigned MIIdx,
|
||||
MachineInstrIndex MIIdx,
|
||||
LiveInterval &interval, bool isAlias) {
|
||||
DEBUG({
|
||||
errs() << "\t\tlivein register: ";
|
||||
@ -967,18 +979,18 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
|
||||
// Look for kills, if it reaches a def before it's killed, then it shouldn't
|
||||
// be considered a livein.
|
||||
MachineBasicBlock::iterator mi = MBB->begin();
|
||||
unsigned baseIndex = MIIdx;
|
||||
unsigned start = baseIndex;
|
||||
while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
|
||||
MachineInstrIndex baseIndex = MIIdx;
|
||||
MachineInstrIndex start = baseIndex;
|
||||
while (baseIndex.getVecIndex() < i2miMap_.size() &&
|
||||
getInstructionFromIndex(baseIndex) == 0)
|
||||
baseIndex += InstrSlots::NUM;
|
||||
unsigned end = baseIndex;
|
||||
baseIndex = baseIndex.nextIndex();
|
||||
MachineInstrIndex end = baseIndex;
|
||||
bool SeenDefUse = false;
|
||||
|
||||
while (mi != MBB->end()) {
|
||||
if (mi->killsRegister(interval.reg, tri_)) {
|
||||
DEBUG(errs() << " killed");
|
||||
end = getUseIndex(baseIndex) + 1;
|
||||
end = getUseIndex(baseIndex).nextSlot();
|
||||
SeenDefUse = true;
|
||||
break;
|
||||
} else if (mi->modifiesRegister(interval.reg, tri_)) {
|
||||
@ -987,17 +999,17 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
|
||||
// it. Hence its interval is:
|
||||
// [defSlot(def), defSlot(def)+1)
|
||||
DEBUG(errs() << " dead");
|
||||
end = getDefIndex(start) + 1;
|
||||
end = getDefIndex(start).nextSlot();
|
||||
SeenDefUse = true;
|
||||
break;
|
||||
}
|
||||
|
||||
baseIndex += InstrSlots::NUM;
|
||||
baseIndex = baseIndex.nextIndex();
|
||||
++mi;
|
||||
if (mi != MBB->end()) {
|
||||
while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
|
||||
while (baseIndex.getVecIndex() < i2miMap_.size() &&
|
||||
getInstructionFromIndex(baseIndex) == 0)
|
||||
baseIndex += InstrSlots::NUM;
|
||||
baseIndex = baseIndex.nextIndex();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1005,7 +1017,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
|
||||
if (!SeenDefUse) {
|
||||
if (isAlias) {
|
||||
DEBUG(errs() << " dead");
|
||||
end = getDefIndex(MIIdx) + 1;
|
||||
end = getDefIndex(MIIdx).nextSlot();
|
||||
} else {
|
||||
DEBUG(errs() << " live through");
|
||||
end = baseIndex;
|
||||
@ -1013,12 +1025,13 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
|
||||
}
|
||||
|
||||
VNInfo *vni =
|
||||
interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
|
||||
interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
|
||||
0, false, VNInfoAllocator);
|
||||
vni->setIsPHIDef(true);
|
||||
LiveRange LR(start, end, vni);
|
||||
|
||||
interval.addRange(LR);
|
||||
interval.addKill(LR.valno, end, false);
|
||||
LR.valno->addKill(end);
|
||||
DEBUG(errs() << " +" << LR << '\n');
|
||||
}
|
||||
|
||||
@ -1036,7 +1049,7 @@ void LiveIntervals::computeIntervals() {
|
||||
MBBI != E; ++MBBI) {
|
||||
MachineBasicBlock *MBB = MBBI;
|
||||
// Track the index of the current machine instr.
|
||||
unsigned MIIndex = getMBBStartIdx(MBB);
|
||||
MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
|
||||
DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
|
||||
|
||||
MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
|
||||
@ -1053,9 +1066,9 @@ void LiveIntervals::computeIntervals() {
|
||||
}
|
||||
|
||||
// Skip over empty initial indices.
|
||||
while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
|
||||
while (MIIndex.getVecIndex() < i2miMap_.size() &&
|
||||
getInstructionFromIndex(MIIndex) == 0)
|
||||
MIIndex += InstrSlots::NUM;
|
||||
MIIndex = MIIndex.nextIndex();
|
||||
|
||||
for (; MI != miEnd; ++MI) {
|
||||
DEBUG(errs() << MIIndex << "\t" << *MI);
|
||||
@ -1077,12 +1090,14 @@ void LiveIntervals::computeIntervals() {
|
||||
unsigned Slots = MI->getDesc().getNumDefs();
|
||||
if (Slots == 0)
|
||||
Slots = 1;
|
||||
MIIndex += InstrSlots::NUM * Slots;
|
||||
|
||||
while (Slots--)
|
||||
MIIndex = MIIndex.nextIndex();
|
||||
|
||||
// Skip over empty indices.
|
||||
while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
|
||||
while (MIIndex.getVecIndex() < i2miMap_.size() &&
|
||||
getInstructionFromIndex(MIIndex) == 0)
|
||||
MIIndex += InstrSlots::NUM;
|
||||
MIIndex = MIIndex.nextIndex();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1095,7 +1110,8 @@ void LiveIntervals::computeIntervals() {
|
||||
}
|
||||
}
|
||||
|
||||
bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
|
||||
bool LiveIntervals::findLiveInMBBs(
|
||||
MachineInstrIndex Start, MachineInstrIndex End,
|
||||
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
|
||||
@ -1111,7 +1127,8 @@ bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
|
||||
return ResVal;
|
||||
}
|
||||
|
||||
bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
|
||||
bool LiveIntervals::findReachableMBBs(
|
||||
MachineInstrIndex Start, MachineInstrIndex End,
|
||||
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
|
||||
@ -1203,8 +1220,8 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
|
||||
/// isValNoAvailableAt - Return true if the val# of the specified interval
|
||||
/// which reaches the given instruction also reaches the specified use index.
|
||||
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
|
||||
unsigned UseIdx) const {
|
||||
unsigned Index = getInstructionIndex(MI);
|
||||
MachineInstrIndex UseIdx) const {
|
||||
MachineInstrIndex Index = getInstructionIndex(MI);
|
||||
VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
|
||||
LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
|
||||
return UI != li.end() && UI->valno == ValNo;
|
||||
@ -1314,7 +1331,7 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
|
||||
for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
|
||||
re = mri_->use_end(); ri != re; ++ri) {
|
||||
MachineInstr *UseMI = &*ri;
|
||||
unsigned UseIdx = getInstructionIndex(UseMI);
|
||||
MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
|
||||
if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
|
||||
continue;
|
||||
if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
|
||||
@ -1399,7 +1416,7 @@ static bool FilterFoldedOps(MachineInstr *MI,
|
||||
/// returns true.
|
||||
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
|
||||
VirtRegMap &vrm, MachineInstr *DefMI,
|
||||
unsigned InstrIdx,
|
||||
MachineInstrIndex InstrIdx,
|
||||
SmallVector<unsigned, 2> &Ops,
|
||||
bool isSS, int Slot, unsigned Reg) {
|
||||
// If it is an implicit def instruction, just delete it.
|
||||
@ -1438,7 +1455,7 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
|
||||
vrm.transferRestorePts(MI, fmi);
|
||||
vrm.transferEmergencySpills(MI, fmi);
|
||||
mi2iMap_.erase(MI);
|
||||
i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
|
||||
i2miMap_[InstrIdx.getVecIndex()] = fmi;
|
||||
mi2iMap_[fmi] = InstrIdx;
|
||||
MI = MBB.insert(MBB.erase(MI), fmi);
|
||||
++numFolds;
|
||||
@ -1511,7 +1528,8 @@ void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
|
||||
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
|
||||
bool LiveIntervals::
|
||||
rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
||||
bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
|
||||
bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
|
||||
MachineInstr *MI,
|
||||
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
|
||||
unsigned Slot, int LdSlot,
|
||||
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
||||
@ -1687,13 +1705,14 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
||||
|
||||
if (HasUse) {
|
||||
if (CreatedNewVReg) {
|
||||
LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
|
||||
nI.getNextValue(0, 0, false, VNInfoAllocator));
|
||||
LiveRange LR(getLoadIndex(index), getUseIndex(index).nextSlot(),
|
||||
nI.getNextValue(MachineInstrIndex(), 0, false,
|
||||
VNInfoAllocator));
|
||||
DEBUG(errs() << " +" << LR);
|
||||
nI.addRange(LR);
|
||||
} else {
|
||||
// Extend the split live interval to this def / use.
|
||||
unsigned End = getUseIndex(index)+1;
|
||||
MachineInstrIndex End = getUseIndex(index).nextSlot();
|
||||
LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
|
||||
nI.getValNumInfo(nI.getNumValNums()-1));
|
||||
DEBUG(errs() << " +" << LR);
|
||||
@ -1702,7 +1721,8 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
||||
}
|
||||
if (HasDef) {
|
||||
LiveRange LR(getDefIndex(index), getStoreIndex(index),
|
||||
nI.getNextValue(0, 0, false, VNInfoAllocator));
|
||||
nI.getNextValue(MachineInstrIndex(), 0, false,
|
||||
VNInfoAllocator));
|
||||
DEBUG(errs() << " +" << LR);
|
||||
nI.addRange(LR);
|
||||
}
|
||||
@ -1717,13 +1737,14 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
||||
}
|
||||
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
|
||||
const VNInfo *VNI,
|
||||
MachineBasicBlock *MBB, unsigned Idx) const {
|
||||
unsigned End = getMBBEndIdx(MBB);
|
||||
MachineBasicBlock *MBB,
|
||||
MachineInstrIndex Idx) const {
|
||||
MachineInstrIndex End = getMBBEndIdx(MBB);
|
||||
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
|
||||
if (VNI->kills[j].isPHIKill)
|
||||
if (VNI->kills[j].isPHIIndex())
|
||||
continue;
|
||||
|
||||
unsigned KillIdx = VNI->kills[j].killIdx;
|
||||
MachineInstrIndex KillIdx = VNI->kills[j];
|
||||
if (KillIdx > Idx && KillIdx < End)
|
||||
return true;
|
||||
}
|
||||
@ -1734,11 +1755,11 @@ bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
|
||||
/// during spilling.
|
||||
namespace {
|
||||
struct RewriteInfo {
|
||||
unsigned Index;
|
||||
MachineInstrIndex Index;
|
||||
MachineInstr *MI;
|
||||
bool HasUse;
|
||||
bool HasDef;
|
||||
RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
|
||||
RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
|
||||
: Index(i), MI(mi), HasUse(u), HasDef(d) {}
|
||||
};
|
||||
|
||||
@ -1767,8 +1788,8 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
std::vector<LiveInterval*> &NewLIs) {
|
||||
bool AllCanFold = true;
|
||||
unsigned NewVReg = 0;
|
||||
unsigned start = getBaseIndex(I->start);
|
||||
unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
|
||||
MachineInstrIndex start = getBaseIndex(I->start);
|
||||
MachineInstrIndex end = getBaseIndex(I->end.prevSlot()).nextIndex();
|
||||
|
||||
// First collect all the def / use in this live range that will be rewritten.
|
||||
// Make sure they are sorted according to instruction index.
|
||||
@ -1779,7 +1800,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
MachineOperand &O = ri.getOperand();
|
||||
++ri;
|
||||
assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
|
||||
unsigned index = getInstructionIndex(MI);
|
||||
MachineInstrIndex index = getInstructionIndex(MI);
|
||||
if (index < start || index >= end)
|
||||
continue;
|
||||
|
||||
@ -1803,7 +1824,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
|
||||
RewriteInfo &rwi = RewriteMIs[i];
|
||||
++i;
|
||||
unsigned index = rwi.Index;
|
||||
MachineInstrIndex index = rwi.Index;
|
||||
bool MIHasUse = rwi.HasUse;
|
||||
bool MIHasDef = rwi.HasDef;
|
||||
MachineInstr *MI = rwi.MI;
|
||||
@ -1889,7 +1910,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
|
||||
else {
|
||||
// If this is a two-address code, then this index starts a new VNInfo.
|
||||
const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
|
||||
const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
|
||||
if (VNI)
|
||||
HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
|
||||
}
|
||||
@ -1902,7 +1923,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
SpillIdxes.insert(std::make_pair(MBBId, S));
|
||||
} else if (SII->second.back().vreg != NewVReg) {
|
||||
SII->second.push_back(SRInfo(index, NewVReg, true));
|
||||
} else if ((int)index > SII->second.back().index) {
|
||||
} else if (index > SII->second.back().index) {
|
||||
// If there is an earlier def and this is a two-address
|
||||
// instruction, then it's not possible to fold the store (which
|
||||
// would also fold the load).
|
||||
@ -1913,7 +1934,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
SpillMBBs.set(MBBId);
|
||||
} else if (SII != SpillIdxes.end() &&
|
||||
SII->second.back().vreg == NewVReg &&
|
||||
(int)index > SII->second.back().index) {
|
||||
index > SII->second.back().index) {
|
||||
// There is an earlier def that's not killed (must be two-address).
|
||||
// The spill is no longer needed.
|
||||
SII->second.pop_back();
|
||||
@ -1930,7 +1951,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
SpillIdxes.find(MBBId);
|
||||
if (SII != SpillIdxes.end() &&
|
||||
SII->second.back().vreg == NewVReg &&
|
||||
(int)index > SII->second.back().index)
|
||||
index > SII->second.back().index)
|
||||
// Use(s) following the last def, it's not safe to fold the spill.
|
||||
SII->second.back().canFold = false;
|
||||
DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
|
||||
@ -1964,8 +1985,8 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
}
|
||||
}
|
||||
|
||||
bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
|
||||
BitVector &RestoreMBBs,
|
||||
bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
|
||||
unsigned vr, BitVector &RestoreMBBs,
|
||||
DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
|
||||
if (!RestoreMBBs[Id])
|
||||
return false;
|
||||
@ -1978,15 +1999,15 @@ bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
|
||||
return false;
|
||||
}
|
||||
|
||||
void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
|
||||
BitVector &RestoreMBBs,
|
||||
void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
|
||||
unsigned vr, BitVector &RestoreMBBs,
|
||||
DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
|
||||
if (!RestoreMBBs[Id])
|
||||
return;
|
||||
std::vector<SRInfo> &Restores = RestoreIdxes[Id];
|
||||
for (unsigned i = 0, e = Restores.size(); i != e; ++i)
|
||||
if (Restores[i].index == index && Restores[i].vreg)
|
||||
Restores[i].index = -1;
|
||||
Restores[i].index = MachineInstrIndex();
|
||||
}
|
||||
|
||||
/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
|
||||
@ -2085,17 +2106,19 @@ addIntervalsForSpillsFast(const LiveInterval &li,
|
||||
}
|
||||
|
||||
// Fill in the new live interval.
|
||||
unsigned index = getInstructionIndex(MI);
|
||||
MachineInstrIndex index = getInstructionIndex(MI);
|
||||
if (HasUse) {
|
||||
LiveRange LR(getLoadIndex(index), getUseIndex(index),
|
||||
nI.getNextValue(0, 0, false, getVNInfoAllocator()));
|
||||
nI.getNextValue(MachineInstrIndex(), 0, false,
|
||||
getVNInfoAllocator()));
|
||||
DEBUG(errs() << " +" << LR);
|
||||
nI.addRange(LR);
|
||||
vrm.addRestorePoint(NewVReg, MI);
|
||||
}
|
||||
if (HasDef) {
|
||||
LiveRange LR(getDefIndex(index), getStoreIndex(index),
|
||||
nI.getNextValue(0, 0, false, getVNInfoAllocator()));
|
||||
nI.getNextValue(MachineInstrIndex(), 0, false,
|
||||
getVNInfoAllocator()));
|
||||
DEBUG(errs() << " +" << LR);
|
||||
nI.addRange(LR);
|
||||
vrm.addSpillPoint(NewVReg, true, MI);
|
||||
@ -2158,8 +2181,8 @@ addIntervalsForSpills(const LiveInterval &li,
|
||||
if (vrm.getPreSplitReg(li.reg)) {
|
||||
vrm.setIsSplitFromReg(li.reg, 0);
|
||||
// Unset the split kill marker on the last use.
|
||||
unsigned KillIdx = vrm.getKillPoint(li.reg);
|
||||
if (KillIdx) {
|
||||
MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
|
||||
if (KillIdx != MachineInstrIndex()) {
|
||||
MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
|
||||
assert(KillMI && "Last use disappeared?");
|
||||
int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
|
||||
@ -2287,7 +2310,7 @@ addIntervalsForSpills(const LiveInterval &li,
|
||||
while (Id != -1) {
|
||||
std::vector<SRInfo> &spills = SpillIdxes[Id];
|
||||
for (unsigned i = 0, e = spills.size(); i != e; ++i) {
|
||||
int index = spills[i].index;
|
||||
MachineInstrIndex index = spills[i].index;
|
||||
unsigned VReg = spills[i].vreg;
|
||||
LiveInterval &nI = getOrCreateInterval(VReg);
|
||||
bool isReMat = vrm.isReMaterialized(VReg);
|
||||
@ -2325,7 +2348,7 @@ addIntervalsForSpills(const LiveInterval &li,
|
||||
if (FoundUse) {
|
||||
// Also folded uses, do not issue a load.
|
||||
eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
|
||||
nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
|
||||
nI.removeRange(getLoadIndex(index), getUseIndex(index).nextSlot());
|
||||
}
|
||||
nI.removeRange(getDefIndex(index), getStoreIndex(index));
|
||||
}
|
||||
@ -2350,8 +2373,8 @@ addIntervalsForSpills(const LiveInterval &li,
|
||||
while (Id != -1) {
|
||||
std::vector<SRInfo> &restores = RestoreIdxes[Id];
|
||||
for (unsigned i = 0, e = restores.size(); i != e; ++i) {
|
||||
int index = restores[i].index;
|
||||
if (index == -1)
|
||||
MachineInstrIndex index = restores[i].index;
|
||||
if (index == MachineInstrIndex())
|
||||
continue;
|
||||
unsigned VReg = restores[i].vreg;
|
||||
LiveInterval &nI = getOrCreateInterval(VReg);
|
||||
@ -2406,7 +2429,7 @@ addIntervalsForSpills(const LiveInterval &li,
|
||||
// If folding is not possible / failed, then tell the spiller to issue a
|
||||
// load / rematerialization for us.
|
||||
if (Folded)
|
||||
nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
|
||||
nI.removeRange(getLoadIndex(index), getUseIndex(index).nextSlot());
|
||||
else
|
||||
vrm.addRestorePoint(VReg, MI);
|
||||
}
|
||||
@ -2422,7 +2445,7 @@ addIntervalsForSpills(const LiveInterval &li,
|
||||
LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
|
||||
if (!AddedKill.count(LI)) {
|
||||
LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
|
||||
unsigned LastUseIdx = getBaseIndex(LR->end);
|
||||
MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
|
||||
MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
|
||||
int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
|
||||
assert(UseIdx != -1);
|
||||
@ -2473,7 +2496,7 @@ unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
|
||||
E = mri_->reg_end(); I != E; ++I) {
|
||||
MachineOperand &O = I.getOperand();
|
||||
MachineInstr *MI = O.getParent();
|
||||
unsigned Index = getInstructionIndex(MI);
|
||||
MachineInstrIndex Index = getInstructionIndex(MI);
|
||||
if (pli.liveAt(Index))
|
||||
++NumConflicts;
|
||||
}
|
||||
@ -2504,11 +2527,11 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
|
||||
if (SeenMIs.count(MI))
|
||||
continue;
|
||||
SeenMIs.insert(MI);
|
||||
unsigned Index = getInstructionIndex(MI);
|
||||
MachineInstrIndex Index = getInstructionIndex(MI);
|
||||
if (pli.liveAt(Index)) {
|
||||
vrm.addEmergencySpill(SpillReg, MI);
|
||||
unsigned StartIdx = getLoadIndex(Index);
|
||||
unsigned EndIdx = getStoreIndex(Index)+1;
|
||||
MachineInstrIndex StartIdx = getLoadIndex(Index);
|
||||
MachineInstrIndex EndIdx = getStoreIndex(Index).nextSlot();
|
||||
if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
|
||||
pli.removeRange(StartIdx, EndIdx);
|
||||
Cut = true;
|
||||
@ -2528,7 +2551,7 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
|
||||
continue;
|
||||
LiveInterval &spli = getInterval(*AS);
|
||||
if (spli.liveAt(Index))
|
||||
spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
|
||||
spli.removeRange(getLoadIndex(Index), getStoreIndex(Index).nextSlot());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2539,13 +2562,13 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
|
||||
MachineInstr* startInst) {
|
||||
LiveInterval& Interval = getOrCreateInterval(reg);
|
||||
VNInfo* VN = Interval.getNextValue(
|
||||
getInstructionIndex(startInst) + InstrSlots::DEF,
|
||||
startInst, true, getVNInfoAllocator());
|
||||
MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
|
||||
startInst, true, getVNInfoAllocator());
|
||||
VN->setHasPHIKill(true);
|
||||
VN->kills.push_back(
|
||||
VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
|
||||
LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
|
||||
getMBBEndIdx(startInst->getParent()) + 1, VN);
|
||||
VN->kills.push_back(terminatorGaps[startInst->getParent()]);
|
||||
LiveRange LR(
|
||||
MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
|
||||
getMBBEndIdx(startInst->getParent()).nextSlot(), VN);
|
||||
Interval.addRange(LR);
|
||||
|
||||
return LR;
|
||||
|
@ -68,7 +68,7 @@ namespace {
|
||||
MachineBasicBlock *BarrierMBB;
|
||||
|
||||
// Barrier - Current barrier index.
|
||||
unsigned BarrierIdx;
|
||||
MachineInstrIndex BarrierIdx;
|
||||
|
||||
// CurrLI - Current live interval being split.
|
||||
LiveInterval *CurrLI;
|
||||
@ -83,7 +83,7 @@ namespace {
|
||||
DenseMap<unsigned, int> IntervalSSMap;
|
||||
|
||||
// Def2SpillMap - A map from a def instruction index to spill index.
|
||||
DenseMap<unsigned, unsigned> Def2SpillMap;
|
||||
DenseMap<MachineInstrIndex, MachineInstrIndex> Def2SpillMap;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
@ -129,22 +129,23 @@ namespace {
|
||||
private:
|
||||
MachineBasicBlock::iterator
|
||||
findNextEmptySlot(MachineBasicBlock*, MachineInstr*,
|
||||
unsigned&);
|
||||
MachineInstrIndex&);
|
||||
|
||||
MachineBasicBlock::iterator
|
||||
findSpillPoint(MachineBasicBlock*, MachineInstr*, MachineInstr*,
|
||||
SmallPtrSet<MachineInstr*, 4>&, unsigned&);
|
||||
SmallPtrSet<MachineInstr*, 4>&, MachineInstrIndex&);
|
||||
|
||||
MachineBasicBlock::iterator
|
||||
findRestorePoint(MachineBasicBlock*, MachineInstr*, unsigned,
|
||||
SmallPtrSet<MachineInstr*, 4>&, unsigned&);
|
||||
findRestorePoint(MachineBasicBlock*, MachineInstr*, MachineInstrIndex,
|
||||
SmallPtrSet<MachineInstr*, 4>&, MachineInstrIndex&);
|
||||
|
||||
int CreateSpillStackSlot(unsigned, const TargetRegisterClass *);
|
||||
|
||||
bool IsAvailableInStack(MachineBasicBlock*, unsigned, unsigned, unsigned,
|
||||
unsigned&, int&) const;
|
||||
bool IsAvailableInStack(MachineBasicBlock*, unsigned,
|
||||
MachineInstrIndex, MachineInstrIndex,
|
||||
MachineInstrIndex&, int&) const;
|
||||
|
||||
void UpdateSpillSlotInterval(VNInfo*, unsigned, unsigned);
|
||||
void UpdateSpillSlotInterval(VNInfo*, MachineInstrIndex, MachineInstrIndex);
|
||||
|
||||
bool SplitRegLiveInterval(LiveInterval*);
|
||||
|
||||
@ -156,7 +157,7 @@ namespace {
|
||||
bool Rematerialize(unsigned vreg, VNInfo* ValNo,
|
||||
MachineInstr* DefMI,
|
||||
MachineBasicBlock::iterator RestorePt,
|
||||
unsigned RestoreIdx,
|
||||
MachineInstrIndex RestoreIdx,
|
||||
SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
|
||||
MachineInstr* FoldSpill(unsigned vreg, const TargetRegisterClass* RC,
|
||||
MachineInstr* DefMI,
|
||||
@ -208,11 +209,12 @@ const PassInfo *const llvm::PreAllocSplittingID = &X;
|
||||
/// instruction index map. If there isn't one, return end().
|
||||
MachineBasicBlock::iterator
|
||||
PreAllocSplitting::findNextEmptySlot(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
unsigned &SpotIndex) {
|
||||
MachineInstrIndex &SpotIndex) {
|
||||
MachineBasicBlock::iterator MII = MI;
|
||||
if (++MII != MBB->end()) {
|
||||
unsigned Index = LIs->findGapBeforeInstr(LIs->getInstructionIndex(MII));
|
||||
if (Index) {
|
||||
MachineInstrIndex Index =
|
||||
LIs->findGapBeforeInstr(LIs->getInstructionIndex(MII));
|
||||
if (Index != MachineInstrIndex()) {
|
||||
SpotIndex = Index;
|
||||
return MII;
|
||||
}
|
||||
@ -228,7 +230,7 @@ MachineBasicBlock::iterator
|
||||
PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
MachineInstr *DefMI,
|
||||
SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
|
||||
unsigned &SpillIndex) {
|
||||
MachineInstrIndex &SpillIndex) {
|
||||
MachineBasicBlock::iterator Pt = MBB->begin();
|
||||
|
||||
MachineBasicBlock::iterator MII = MI;
|
||||
@ -241,7 +243,7 @@ PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
|
||||
|
||||
while (MII != EndPt && !RefsInMBB.count(MII)) {
|
||||
unsigned Index = LIs->getInstructionIndex(MII);
|
||||
MachineInstrIndex Index = LIs->getInstructionIndex(MII);
|
||||
|
||||
// We can't insert the spill between the barrier (a call), and its
|
||||
// corresponding call frame setup.
|
||||
@ -274,9 +276,9 @@ PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
/// found.
|
||||
MachineBasicBlock::iterator
|
||||
PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
unsigned LastIdx,
|
||||
MachineInstrIndex LastIdx,
|
||||
SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
|
||||
unsigned &RestoreIndex) {
|
||||
MachineInstrIndex &RestoreIndex) {
|
||||
// FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
|
||||
// begin index accordingly.
|
||||
MachineBasicBlock::iterator Pt = MBB->end();
|
||||
@ -297,10 +299,10 @@ PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
// FIXME: Limit the number of instructions to examine to reduce
|
||||
// compile time?
|
||||
while (MII != EndPt) {
|
||||
unsigned Index = LIs->getInstructionIndex(MII);
|
||||
MachineInstrIndex Index = LIs->getInstructionIndex(MII);
|
||||
if (Index > LastIdx)
|
||||
break;
|
||||
unsigned Gap = LIs->findGapBeforeInstr(Index);
|
||||
MachineInstrIndex Gap = LIs->findGapBeforeInstr(Index);
|
||||
|
||||
// We can't insert a restore between the barrier (a call) and its
|
||||
// corresponding call frame teardown.
|
||||
@ -309,7 +311,7 @@ PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
|
||||
++MII;
|
||||
} while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
|
||||
} else if (Gap) {
|
||||
} else if (Gap != MachineInstrIndex()) {
|
||||
Pt = MII;
|
||||
RestoreIndex = Gap;
|
||||
}
|
||||
@ -342,7 +344,8 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
|
||||
if (CurrSLI->hasAtLeastOneValue())
|
||||
CurrSValNo = CurrSLI->getValNumInfo(0);
|
||||
else
|
||||
CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator());
|
||||
CurrSValNo = CurrSLI->getNextValue(MachineInstrIndex(), 0, false,
|
||||
LSs->getVNInfoAllocator());
|
||||
return SS;
|
||||
}
|
||||
|
||||
@ -350,8 +353,9 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
|
||||
/// slot at the specified index.
|
||||
bool
|
||||
PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
|
||||
unsigned Reg, unsigned DefIndex,
|
||||
unsigned RestoreIndex, unsigned &SpillIndex,
|
||||
unsigned Reg, MachineInstrIndex DefIndex,
|
||||
MachineInstrIndex RestoreIndex,
|
||||
MachineInstrIndex &SpillIndex,
|
||||
int& SS) const {
|
||||
if (!DefMBB)
|
||||
return false;
|
||||
@ -359,7 +363,8 @@ PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
|
||||
DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
|
||||
if (I == IntervalSSMap.end())
|
||||
return false;
|
||||
DenseMap<unsigned, unsigned>::iterator II = Def2SpillMap.find(DefIndex);
|
||||
DenseMap<MachineInstrIndex, MachineInstrIndex>::iterator
|
||||
II = Def2SpillMap.find(DefIndex);
|
||||
if (II == Def2SpillMap.end())
|
||||
return false;
|
||||
|
||||
@ -379,8 +384,8 @@ PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
|
||||
/// interval being split, and the spill and restore indicies, update the live
|
||||
/// interval of the spill stack slot.
|
||||
void
|
||||
PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, unsigned SpillIndex,
|
||||
unsigned RestoreIndex) {
|
||||
PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, MachineInstrIndex SpillIndex,
|
||||
MachineInstrIndex RestoreIndex) {
|
||||
assert(LIs->getMBBFromIndex(RestoreIndex) == BarrierMBB &&
|
||||
"Expect restore in the barrier mbb");
|
||||
|
||||
@ -393,8 +398,8 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, unsigned SpillIndex,
|
||||
}
|
||||
|
||||
SmallPtrSet<MachineBasicBlock*, 4> Processed;
|
||||
unsigned EndIdx = LIs->getMBBEndIdx(MBB);
|
||||
LiveRange SLR(SpillIndex, EndIdx+1, CurrSValNo);
|
||||
MachineInstrIndex EndIdx = LIs->getMBBEndIdx(MBB);
|
||||
LiveRange SLR(SpillIndex, LIs->getNextSlot(EndIdx), CurrSValNo);
|
||||
CurrSLI->addRange(SLR);
|
||||
Processed.insert(MBB);
|
||||
|
||||
@ -413,7 +418,7 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, unsigned SpillIndex,
|
||||
WorkList.pop_back();
|
||||
if (Processed.count(MBB))
|
||||
continue;
|
||||
unsigned Idx = LIs->getMBBStartIdx(MBB);
|
||||
MachineInstrIndex Idx = LIs->getMBBStartIdx(MBB);
|
||||
LR = CurrLI->getLiveRangeContaining(Idx);
|
||||
if (LR && LR->valno == ValNo) {
|
||||
EndIdx = LIs->getMBBEndIdx(MBB);
|
||||
@ -423,7 +428,7 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, unsigned SpillIndex,
|
||||
CurrSLI->addRange(SLR);
|
||||
} else if (LR->end > EndIdx) {
|
||||
// Live range extends beyond end of mbb, process successors.
|
||||
LiveRange SLR(Idx, EndIdx+1, CurrSValNo);
|
||||
LiveRange SLR(Idx, LIs->getNextIndex(EndIdx), CurrSValNo);
|
||||
CurrSLI->addRange(SLR);
|
||||
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
|
||||
SE = MBB->succ_end(); SI != SE; ++SI)
|
||||
@ -486,12 +491,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
}
|
||||
|
||||
// Once we've found it, extend its VNInfo to our instruction.
|
||||
unsigned DefIndex = LIs->getInstructionIndex(Walker);
|
||||
MachineInstrIndex DefIndex = LIs->getInstructionIndex(Walker);
|
||||
DefIndex = LiveIntervals::getDefIndex(DefIndex);
|
||||
unsigned EndIndex = LIs->getMBBEndIdx(MBB);
|
||||
MachineInstrIndex EndIndex = LIs->getMBBEndIdx(MBB);
|
||||
|
||||
RetVNI = NewVNs[Walker];
|
||||
LI->addRange(LiveRange(DefIndex, EndIndex+1, RetVNI));
|
||||
LI->addRange(LiveRange(DefIndex, LIs->getNextSlot(EndIndex), RetVNI));
|
||||
} else if (!ContainsDefs && ContainsUses) {
|
||||
SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
|
||||
|
||||
@ -523,9 +528,9 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
IsTopLevel, IsIntraBlock);
|
||||
}
|
||||
|
||||
unsigned UseIndex = LIs->getInstructionIndex(Walker);
|
||||
MachineInstrIndex UseIndex = LIs->getInstructionIndex(Walker);
|
||||
UseIndex = LiveIntervals::getUseIndex(UseIndex);
|
||||
unsigned EndIndex = 0;
|
||||
MachineInstrIndex EndIndex;
|
||||
if (IsIntraBlock) {
|
||||
EndIndex = LIs->getInstructionIndex(UseI);
|
||||
EndIndex = LiveIntervals::getUseIndex(EndIndex);
|
||||
@ -537,12 +542,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
|
||||
NewVNs, LiveOut, Phis, false, true);
|
||||
|
||||
LI->addRange(LiveRange(UseIndex, EndIndex+1, RetVNI));
|
||||
LI->addRange(LiveRange(UseIndex, LIs->getNextSlot(EndIndex), RetVNI));
|
||||
|
||||
// FIXME: Need to set kills properly for inter-block stuff.
|
||||
if (LI->isKill(RetVNI, UseIndex)) LI->removeKill(RetVNI, UseIndex);
|
||||
if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex);
|
||||
if (IsIntraBlock)
|
||||
LI->addKill(RetVNI, EndIndex, false);
|
||||
RetVNI->addKill(EndIndex);
|
||||
} else if (ContainsDefs && ContainsUses) {
|
||||
SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
|
||||
SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
|
||||
@ -583,10 +588,10 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
IsTopLevel, IsIntraBlock);
|
||||
}
|
||||
|
||||
unsigned StartIndex = LIs->getInstructionIndex(Walker);
|
||||
MachineInstrIndex StartIndex = LIs->getInstructionIndex(Walker);
|
||||
StartIndex = foundDef ? LiveIntervals::getDefIndex(StartIndex) :
|
||||
LiveIntervals::getUseIndex(StartIndex);
|
||||
unsigned EndIndex = 0;
|
||||
MachineInstrIndex EndIndex;
|
||||
if (IsIntraBlock) {
|
||||
EndIndex = LIs->getInstructionIndex(UseI);
|
||||
EndIndex = LiveIntervals::getUseIndex(EndIndex);
|
||||
@ -599,12 +604,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
|
||||
NewVNs, LiveOut, Phis, false, true);
|
||||
|
||||
LI->addRange(LiveRange(StartIndex, EndIndex+1, RetVNI));
|
||||
LI->addRange(LiveRange(StartIndex, LIs->getNextSlot(EndIndex), RetVNI));
|
||||
|
||||
if (foundUse && LI->isKill(RetVNI, StartIndex))
|
||||
LI->removeKill(RetVNI, StartIndex);
|
||||
if (foundUse && RetVNI->isKill(StartIndex))
|
||||
RetVNI->removeKill(StartIndex);
|
||||
if (IsIntraBlock) {
|
||||
LI->addKill(RetVNI, EndIndex, false);
|
||||
RetVNI->addKill(EndIndex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -635,9 +640,10 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
|
||||
// assume that we are not intrablock here.
|
||||
if (Phis.count(MBB)) return Phis[MBB];
|
||||
|
||||
unsigned StartIndex = LIs->getMBBStartIdx(MBB);
|
||||
MachineInstrIndex StartIndex = LIs->getMBBStartIdx(MBB);
|
||||
VNInfo *RetVNI = Phis[MBB] =
|
||||
LI->getNextValue(0, /*FIXME*/ 0, false, LIs->getVNInfoAllocator());
|
||||
LI->getNextValue(MachineInstrIndex(), /*FIXME*/ 0, false,
|
||||
LIs->getVNInfoAllocator());
|
||||
|
||||
if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
|
||||
|
||||
@ -679,21 +685,21 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
|
||||
for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
|
||||
IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
|
||||
I->second->setHasPHIKill(true);
|
||||
unsigned KillIndex = LIs->getMBBEndIdx(I->first);
|
||||
if (!LiveInterval::isKill(I->second, KillIndex))
|
||||
LI->addKill(I->second, KillIndex, false);
|
||||
MachineInstrIndex KillIndex = LIs->getMBBEndIdx(I->first);
|
||||
if (!I->second->isKill(KillIndex))
|
||||
I->second->addKill(KillIndex);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned EndIndex = 0;
|
||||
MachineInstrIndex EndIndex;
|
||||
if (IsIntraBlock) {
|
||||
EndIndex = LIs->getInstructionIndex(UseI);
|
||||
EndIndex = LiveIntervals::getUseIndex(EndIndex);
|
||||
} else
|
||||
EndIndex = LIs->getMBBEndIdx(MBB);
|
||||
LI->addRange(LiveRange(StartIndex, EndIndex+1, RetVNI));
|
||||
LI->addRange(LiveRange(StartIndex, LIs->getNextSlot(EndIndex), RetVNI));
|
||||
if (IsIntraBlock)
|
||||
LI->addKill(RetVNI, EndIndex, false);
|
||||
RetVNI->addKill(EndIndex);
|
||||
|
||||
// Memoize results so we don't have to recompute them.
|
||||
if (!IsIntraBlock)
|
||||
@ -727,7 +733,7 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
|
||||
DE = MRI->def_end(); DI != DE; ++DI) {
|
||||
Defs[(*DI).getParent()].insert(&*DI);
|
||||
|
||||
unsigned DefIdx = LIs->getInstructionIndex(&*DI);
|
||||
MachineInstrIndex DefIdx = LIs->getInstructionIndex(&*DI);
|
||||
DefIdx = LiveIntervals::getDefIndex(DefIdx);
|
||||
|
||||
assert(DI->getOpcode() != TargetInstrInfo::PHI &&
|
||||
@ -763,14 +769,14 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
|
||||
// Add ranges for dead defs
|
||||
for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
|
||||
DE = MRI->def_end(); DI != DE; ++DI) {
|
||||
unsigned DefIdx = LIs->getInstructionIndex(&*DI);
|
||||
MachineInstrIndex DefIdx = LIs->getInstructionIndex(&*DI);
|
||||
DefIdx = LiveIntervals::getDefIndex(DefIdx);
|
||||
|
||||
if (LI->liveAt(DefIdx)) continue;
|
||||
|
||||
VNInfo* DeadVN = NewVNs[&*DI];
|
||||
LI->addRange(LiveRange(DefIdx, DefIdx+1, DeadVN));
|
||||
LI->addKill(DeadVN, DefIdx, false);
|
||||
LI->addRange(LiveRange(DefIdx, LIs->getNextSlot(DefIdx), DeadVN));
|
||||
DeadVN->addKill(DefIdx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -802,13 +808,15 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
|
||||
// Locate two-address redefinitions
|
||||
for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
|
||||
KE = OldVN->kills.end(); KI != KE; ++KI) {
|
||||
assert(!KI->isPHIKill && "VN previously reported having no PHI kills.");
|
||||
MachineInstr* MI = LIs->getInstructionFromIndex(KI->killIdx);
|
||||
assert(!KI->isPHIIndex() &&
|
||||
"VN previously reported having no PHI kills.");
|
||||
MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
|
||||
unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
|
||||
if (DefIdx == ~0U) continue;
|
||||
if (MI->isRegTiedToUseOperand(DefIdx)) {
|
||||
VNInfo* NextVN =
|
||||
CurrLI->findDefinedVNInfo(LiveIntervals::getDefIndex(KI->killIdx));
|
||||
CurrLI->findDefinedVNInfoForRegInt(
|
||||
LiveIntervals::getDefIndex(*KI));
|
||||
if (NextVN == OldVN) continue;
|
||||
Stack.push_back(NextVN);
|
||||
}
|
||||
@ -840,7 +848,7 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
|
||||
for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
|
||||
E = MRI->reg_end(); I != E; ++I) {
|
||||
MachineOperand& MO = I.getOperand();
|
||||
unsigned InstrIdx = LIs->getInstructionIndex(&*I);
|
||||
MachineInstrIndex InstrIdx = LIs->getInstructionIndex(&*I);
|
||||
|
||||
if ((MO.isUse() && NewLI.liveAt(LiveIntervals::getUseIndex(InstrIdx))) ||
|
||||
(MO.isDef() && NewLI.liveAt(LiveIntervals::getDefIndex(InstrIdx))))
|
||||
@ -868,12 +876,12 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
|
||||
bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
|
||||
MachineInstr* DefMI,
|
||||
MachineBasicBlock::iterator RestorePt,
|
||||
unsigned RestoreIdx,
|
||||
MachineInstrIndex RestoreIdx,
|
||||
SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
|
||||
MachineBasicBlock& MBB = *RestorePt->getParent();
|
||||
|
||||
MachineBasicBlock::iterator KillPt = BarrierMBB->end();
|
||||
unsigned KillIdx = 0;
|
||||
MachineInstrIndex KillIdx;
|
||||
if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
|
||||
KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx);
|
||||
else
|
||||
@ -886,9 +894,9 @@ bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
|
||||
LIs->InsertMachineInstrInMaps(prior(RestorePt), RestoreIdx);
|
||||
|
||||
ReconstructLiveInterval(CurrLI);
|
||||
unsigned RematIdx = LIs->getInstructionIndex(prior(RestorePt));
|
||||
MachineInstrIndex RematIdx = LIs->getInstructionIndex(prior(RestorePt));
|
||||
RematIdx = LiveIntervals::getDefIndex(RematIdx);
|
||||
RenumberValno(CurrLI->findDefinedVNInfo(RematIdx));
|
||||
RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RematIdx));
|
||||
|
||||
++NumSplits;
|
||||
++NumRemats;
|
||||
@ -943,7 +951,8 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
|
||||
if (CurrSLI->hasAtLeastOneValue())
|
||||
CurrSValNo = CurrSLI->getValNumInfo(0);
|
||||
else
|
||||
CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator());
|
||||
CurrSValNo = CurrSLI->getNextValue(MachineInstrIndex(), 0, false,
|
||||
LSs->getVNInfoAllocator());
|
||||
}
|
||||
|
||||
return FMI;
|
||||
@ -1052,7 +1061,7 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
}
|
||||
|
||||
// Find a point to restore the value after the barrier.
|
||||
unsigned RestoreIndex = 0;
|
||||
MachineInstrIndex RestoreIndex;
|
||||
MachineBasicBlock::iterator RestorePt =
|
||||
findRestorePoint(BarrierMBB, Barrier, LR->end, RefsInMBB, RestoreIndex);
|
||||
if (RestorePt == BarrierMBB->end())
|
||||
@ -1066,7 +1075,7 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
// Add a spill either before the barrier or after the definition.
|
||||
MachineBasicBlock *DefMBB = DefMI ? DefMI->getParent() : NULL;
|
||||
const TargetRegisterClass *RC = MRI->getRegClass(CurrLI->reg);
|
||||
unsigned SpillIndex = 0;
|
||||
MachineInstrIndex SpillIndex;
|
||||
MachineInstr *SpillMI = NULL;
|
||||
int SS = -1;
|
||||
if (!ValNo->isDefAccurate()) {
|
||||
@ -1138,15 +1147,15 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
}
|
||||
|
||||
// Update spill stack slot live interval.
|
||||
UpdateSpillSlotInterval(ValNo, LIs->getUseIndex(SpillIndex)+1,
|
||||
UpdateSpillSlotInterval(ValNo, LIs->getNextSlot(LIs->getUseIndex(SpillIndex)),
|
||||
LIs->getDefIndex(RestoreIndex));
|
||||
|
||||
ReconstructLiveInterval(CurrLI);
|
||||
|
||||
if (!FoldedRestore) {
|
||||
unsigned RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
|
||||
MachineInstrIndex RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
|
||||
RestoreIdx = LiveIntervals::getDefIndex(RestoreIdx);
|
||||
RenumberValno(CurrLI->findDefinedVNInfo(RestoreIdx));
|
||||
RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RestoreIdx));
|
||||
}
|
||||
|
||||
++NumSplits;
|
||||
@ -1232,7 +1241,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
|
||||
// reaching definition (VNInfo).
|
||||
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
|
||||
UE = MRI->use_end(); UI != UE; ++UI) {
|
||||
unsigned index = LIs->getInstructionIndex(&*UI);
|
||||
MachineInstrIndex index = LIs->getInstructionIndex(&*UI);
|
||||
index = LiveIntervals::getUseIndex(index);
|
||||
|
||||
const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
|
||||
@ -1382,7 +1391,7 @@ bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
|
||||
if (LR->valno->hasPHIKill())
|
||||
return false;
|
||||
|
||||
unsigned MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
|
||||
MachineInstrIndex MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
|
||||
if (LR->end < MBBEnd)
|
||||
return false;
|
||||
|
||||
|
@ -176,11 +176,11 @@ namespace {
|
||||
|
||||
/// processActiveIntervals - expire old intervals and move non-overlapping
|
||||
/// ones to the inactive list.
|
||||
void processActiveIntervals(unsigned CurPoint);
|
||||
void processActiveIntervals(MachineInstrIndex CurPoint);
|
||||
|
||||
/// processInactiveIntervals - expire old intervals and move overlapping
|
||||
/// ones to the active list.
|
||||
void processInactiveIntervals(unsigned CurPoint);
|
||||
void processInactiveIntervals(MachineInstrIndex CurPoint);
|
||||
|
||||
/// hasNextReloadInterval - Return the next liveinterval that's being
|
||||
/// defined by a reload from the same SS as the specified one.
|
||||
@ -366,7 +366,8 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
|
||||
return Reg;
|
||||
|
||||
VNInfo *vni = cur.begin()->valno;
|
||||
if (!vni->def || vni->isUnused() || !vni->isDefAccurate())
|
||||
if ((vni->def == MachineInstrIndex()) ||
|
||||
vni->isUnused() || !vni->isDefAccurate())
|
||||
return Reg;
|
||||
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
|
||||
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg, PhysReg;
|
||||
@ -503,8 +504,8 @@ void RALinScan::linearScan() {
|
||||
DEBUG(errs() << "\n*** CURRENT ***: " << *cur << '\n');
|
||||
|
||||
if (!cur->empty()) {
|
||||
processActiveIntervals(cur->beginNumber());
|
||||
processInactiveIntervals(cur->beginNumber());
|
||||
processActiveIntervals(cur->beginIndex());
|
||||
processInactiveIntervals(cur->beginIndex());
|
||||
|
||||
assert(TargetRegisterInfo::isVirtualRegister(cur->reg) &&
|
||||
"Can only allocate virtual registers!");
|
||||
@ -585,7 +586,7 @@ void RALinScan::linearScan() {
|
||||
|
||||
/// processActiveIntervals - expire old intervals and move non-overlapping ones
|
||||
/// to the inactive list.
|
||||
void RALinScan::processActiveIntervals(unsigned CurPoint)
|
||||
void RALinScan::processActiveIntervals(MachineInstrIndex CurPoint)
|
||||
{
|
||||
DEBUG(errs() << "\tprocessing active intervals:\n");
|
||||
|
||||
@ -631,7 +632,7 @@ void RALinScan::processActiveIntervals(unsigned CurPoint)
|
||||
|
||||
/// processInactiveIntervals - expire old intervals and move overlapping
|
||||
/// ones to the active list.
|
||||
void RALinScan::processInactiveIntervals(unsigned CurPoint)
|
||||
void RALinScan::processInactiveIntervals(MachineInstrIndex CurPoint)
|
||||
{
|
||||
DEBUG(errs() << "\tprocessing inactive intervals:\n");
|
||||
|
||||
@ -712,7 +713,7 @@ FindIntervalInVector(RALinScan::IntervalPtrs &IP, LiveInterval *LI) {
|
||||
return IP.end();
|
||||
}
|
||||
|
||||
static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, unsigned Point){
|
||||
static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, MachineInstrIndex Point){
|
||||
for (unsigned i = 0, e = V.size(); i != e; ++i) {
|
||||
RALinScan::IntervalPtr &IP = V[i];
|
||||
LiveInterval::iterator I = std::upper_bound(IP.first->begin(),
|
||||
@ -738,7 +739,8 @@ static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
|
||||
if (SI.hasAtLeastOneValue())
|
||||
VNI = SI.getValNumInfo(0);
|
||||
else
|
||||
VNI = SI.getNextValue(0, 0, false, ls_->getVNInfoAllocator());
|
||||
VNI = SI.getNextValue(MachineInstrIndex(), 0, false,
|
||||
ls_->getVNInfoAllocator());
|
||||
|
||||
LiveInterval &RI = li_->getInterval(cur->reg);
|
||||
// FIXME: This may be overly conservative.
|
||||
@ -880,7 +882,7 @@ void RALinScan::UpgradeRegister(unsigned Reg) {
|
||||
namespace {
|
||||
struct LISorter {
|
||||
bool operator()(LiveInterval* A, LiveInterval* B) {
|
||||
return A->beginNumber() < B->beginNumber();
|
||||
return A->beginIndex() < B->beginIndex();
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -905,7 +907,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
backUpRegUses();
|
||||
|
||||
std::vector<std::pair<unsigned, float> > SpillWeightsToAdd;
|
||||
unsigned StartPosition = cur->beginNumber();
|
||||
MachineInstrIndex StartPosition = cur->beginIndex();
|
||||
const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
|
||||
|
||||
// If start of this live interval is defined by a move instruction and its
|
||||
@ -915,7 +917,8 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
// one, e.g. X86::mov32to32_. These move instructions are not coalescable.
|
||||
if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
|
||||
VNInfo *vni = cur->begin()->valno;
|
||||
if (vni->def && !vni->isUnused() && vni->isDefAccurate()) {
|
||||
if ((vni->def != MachineInstrIndex()) && !vni->isUnused() &&
|
||||
vni->isDefAccurate()) {
|
||||
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
|
||||
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
|
||||
if (CopyMI &&
|
||||
@ -977,7 +980,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
// Okay, this reg is on the fixed list. Check to see if we actually
|
||||
// conflict.
|
||||
LiveInterval *I = IP.first;
|
||||
if (I->endNumber() > StartPosition) {
|
||||
if (I->endIndex() > StartPosition) {
|
||||
LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
|
||||
IP.second = II;
|
||||
if (II != I->begin() && II->start > StartPosition)
|
||||
@ -1002,7 +1005,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
|
||||
const TargetRegisterClass *RegRC = OneClassForEachPhysReg[I->reg];
|
||||
if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
|
||||
I->endNumber() > StartPosition) {
|
||||
I->endIndex() > StartPosition) {
|
||||
LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
|
||||
IP.second = II;
|
||||
if (II != I->begin() && II->start > StartPosition)
|
||||
@ -1170,14 +1173,14 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
LiveInterval *ReloadLi = added[i];
|
||||
if (ReloadLi->weight == HUGE_VALF &&
|
||||
li_->getApproximateInstructionCount(*ReloadLi) == 0) {
|
||||
unsigned ReloadIdx = ReloadLi->beginNumber();
|
||||
MachineInstrIndex ReloadIdx = ReloadLi->beginIndex();
|
||||
MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
|
||||
int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
|
||||
if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
|
||||
// Last reload of same SS is in the same MBB. We want to try to
|
||||
// allocate both reloads the same register and make sure the reg
|
||||
// isn't clobbered in between if at all possible.
|
||||
assert(LastReload->beginNumber() < ReloadIdx);
|
||||
assert(LastReload->beginIndex() < ReloadIdx);
|
||||
NextReloadMap.insert(std::make_pair(LastReload->reg, ReloadLi->reg));
|
||||
}
|
||||
LastReloadMBB = ReloadMBB;
|
||||
@ -1226,7 +1229,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
spillIs.pop_back();
|
||||
DEBUG(errs() << "\t\t\tspilling(a): " << *sli << '\n');
|
||||
earliestStartInterval =
|
||||
(earliestStartInterval->beginNumber() < sli->beginNumber()) ?
|
||||
(earliestStartInterval->beginIndex() < sli->beginIndex()) ?
|
||||
earliestStartInterval : sli;
|
||||
|
||||
std::vector<LiveInterval*> newIs;
|
||||
@ -1240,7 +1243,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
spilled.insert(sli->reg);
|
||||
}
|
||||
|
||||
unsigned earliestStart = earliestStartInterval->beginNumber();
|
||||
MachineInstrIndex earliestStart = earliestStartInterval->beginIndex();
|
||||
|
||||
DEBUG(errs() << "\t\trolling back to: " << earliestStart << '\n');
|
||||
|
||||
@ -1250,7 +1253,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
while (!handled_.empty()) {
|
||||
LiveInterval* i = handled_.back();
|
||||
// If this interval starts before t we are done.
|
||||
if (i->beginNumber() < earliestStart)
|
||||
if (i->beginIndex() < earliestStart)
|
||||
break;
|
||||
DEBUG(errs() << "\t\t\tundo changes for: " << *i << '\n');
|
||||
handled_.pop_back();
|
||||
@ -1301,7 +1304,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
for (unsigned i = 0, e = handled_.size(); i != e; ++i) {
|
||||
LiveInterval *HI = handled_[i];
|
||||
if (!HI->expiredAt(earliestStart) &&
|
||||
HI->expiredAt(cur->beginNumber())) {
|
||||
HI->expiredAt(cur->beginIndex())) {
|
||||
DEBUG(errs() << "\t\t\tundo changes for: " << *HI << '\n');
|
||||
active_.push_back(std::make_pair(HI, HI->begin()));
|
||||
assert(!TargetRegisterInfo::isPhysicalRegister(HI->reg));
|
||||
@ -1321,14 +1324,14 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
LiveInterval *ReloadLi = added[i];
|
||||
if (ReloadLi->weight == HUGE_VALF &&
|
||||
li_->getApproximateInstructionCount(*ReloadLi) == 0) {
|
||||
unsigned ReloadIdx = ReloadLi->beginNumber();
|
||||
MachineInstrIndex ReloadIdx = ReloadLi->beginIndex();
|
||||
MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
|
||||
int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
|
||||
if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
|
||||
// Last reload of same SS is in the same MBB. We want to try to
|
||||
// allocate both reloads the same register and make sure the reg
|
||||
// isn't clobbered in between if at all possible.
|
||||
assert(LastReload->beginNumber() < ReloadIdx);
|
||||
assert(LastReload->beginIndex() < ReloadIdx);
|
||||
NextReloadMap.insert(std::make_pair(LastReload->reg, ReloadLi->reg));
|
||||
}
|
||||
LastReloadMBB = ReloadMBB;
|
||||
|
@ -683,7 +683,8 @@ void PBQPRegAlloc::addStackInterval(const LiveInterval *spilled,
|
||||
if (stackInterval.getNumValNums() != 0)
|
||||
vni = stackInterval.getValNumInfo(0);
|
||||
else
|
||||
vni = stackInterval.getNextValue(0, 0, false, lss->getVNInfoAllocator());
|
||||
vni = stackInterval.getNextValue(
|
||||
MachineInstrIndex(), 0, false, lss->getVNInfoAllocator());
|
||||
|
||||
LiveInterval &rhsInterval = lis->getInterval(spilled->reg);
|
||||
stackInterval.MergeRangesInAsValue(rhsInterval, vni);
|
||||
|
@ -108,7 +108,7 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
LiveInterval &IntB,
|
||||
MachineInstr *CopyMI) {
|
||||
unsigned CopyIdx = li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
MachineInstrIndex CopyIdx = li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
|
||||
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
|
||||
// the example above.
|
||||
@ -123,7 +123,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
|
||||
|
||||
// AValNo is the value number in A that defines the copy, A3 in the example.
|
||||
unsigned CopyUseIdx = li_->getUseIndex(CopyIdx);
|
||||
MachineInstrIndex CopyUseIdx = li_->getUseIndex(CopyIdx);
|
||||
LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
|
||||
assert(ALR != IntA.end() && "Live range not found!");
|
||||
VNInfo *AValNo = ALR->valno;
|
||||
@ -160,12 +160,14 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
if (SrcReg != IntB.reg) return false;
|
||||
|
||||
// Get the LiveRange in IntB that this value number starts with.
|
||||
LiveInterval::iterator ValLR = IntB.FindLiveRangeContaining(AValNo->def-1);
|
||||
LiveInterval::iterator ValLR =
|
||||
IntB.FindLiveRangeContaining(li_->getPrevSlot(AValNo->def));
|
||||
assert(ValLR != IntB.end() && "Live range not found!");
|
||||
|
||||
// Make sure that the end of the live range is inside the same block as
|
||||
// CopyMI.
|
||||
MachineInstr *ValLREndInst = li_->getInstructionFromIndex(ValLR->end-1);
|
||||
MachineInstr *ValLREndInst =
|
||||
li_->getInstructionFromIndex(li_->getPrevSlot(ValLR->end));
|
||||
if (!ValLREndInst ||
|
||||
ValLREndInst->getParent() != CopyMI->getParent()) return false;
|
||||
|
||||
@ -194,7 +196,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
IntB.print(errs(), tri_);
|
||||
});
|
||||
|
||||
unsigned FillerStart = ValLR->end, FillerEnd = BLR->start;
|
||||
MachineInstrIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
|
||||
// We are about to delete CopyMI, so need to remove it as the 'instruction
|
||||
// that defines this value #'. Update the the valnum with the new defining
|
||||
// instruction #.
|
||||
@ -233,7 +235,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
|
||||
if (UIdx != -1) {
|
||||
ValLREndInst->getOperand(UIdx).setIsKill(false);
|
||||
IntB.removeKill(ValLR->valno, FillerStart);
|
||||
ValLR->valno->removeKill(FillerStart);
|
||||
}
|
||||
|
||||
// If the copy instruction was killing the destination register before the
|
||||
@ -297,7 +299,8 @@ bool SimpleRegisterCoalescing::HasOtherReachingDefs(LiveInterval &IntA,
|
||||
bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
LiveInterval &IntB,
|
||||
MachineInstr *CopyMI) {
|
||||
unsigned CopyIdx = li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
MachineInstrIndex CopyIdx =
|
||||
li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
|
||||
// FIXME: For now, only eliminate the copy by commuting its def when the
|
||||
// source register is a virtual register. We want to guard against cases
|
||||
@ -319,7 +322,9 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
|
||||
|
||||
// AValNo is the value number in A that defines the copy, A3 in the example.
|
||||
LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyIdx-1);
|
||||
LiveInterval::iterator ALR =
|
||||
IntA.FindLiveRangeContaining(li_->getPrevSlot(CopyIdx));
|
||||
|
||||
assert(ALR != IntA.end() && "Live range not found!");
|
||||
VNInfo *AValNo = ALR->valno;
|
||||
// If other defs can reach uses of this def, then it's not safe to perform
|
||||
@ -364,7 +369,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
|
||||
UE = mri_->use_end(); UI != UE; ++UI) {
|
||||
MachineInstr *UseMI = &*UI;
|
||||
unsigned UseIdx = li_->getInstructionIndex(UseMI);
|
||||
MachineInstrIndex UseIdx = li_->getInstructionIndex(UseMI);
|
||||
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
|
||||
if (ULR == IntA.end())
|
||||
continue;
|
||||
@ -389,7 +394,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
bool BHasPHIKill = BValNo->hasPHIKill();
|
||||
SmallVector<VNInfo*, 4> BDeadValNos;
|
||||
VNInfo::KillSet BKills;
|
||||
std::map<unsigned, unsigned> BExtend;
|
||||
std::map<MachineInstrIndex, MachineInstrIndex> BExtend;
|
||||
|
||||
// If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
|
||||
// A = or A, B
|
||||
@ -416,7 +421,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
++UI;
|
||||
if (JoinedCopies.count(UseMI))
|
||||
continue;
|
||||
unsigned UseIdx = li_->getInstructionIndex(UseMI);
|
||||
MachineInstrIndex UseIdx = li_->getInstructionIndex(UseMI);
|
||||
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
|
||||
if (ULR == IntA.end() || ULR->valno != AValNo)
|
||||
continue;
|
||||
@ -427,7 +432,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
if (Extended)
|
||||
UseMO.setIsKill(false);
|
||||
else
|
||||
BKills.push_back(VNInfo::KillInfo(false, li_->getUseIndex(UseIdx)+1));
|
||||
BKills.push_back(li_->getNextSlot(li_->getUseIndex(UseIdx)));
|
||||
}
|
||||
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
||||
if (!tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
|
||||
@ -436,7 +441,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
// This copy will become a noop. If it's defining a new val#,
|
||||
// remove that val# as well. However this live range is being
|
||||
// extended to the end of the existing live range defined by the copy.
|
||||
unsigned DefIdx = li_->getDefIndex(UseIdx);
|
||||
MachineInstrIndex DefIdx = li_->getDefIndex(UseIdx);
|
||||
const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
|
||||
BHasPHIKill |= DLR->valno->hasPHIKill();
|
||||
assert(DLR->valno->def == DefIdx);
|
||||
@ -476,16 +481,16 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
ValNo->def = AValNo->def;
|
||||
ValNo->setCopy(0);
|
||||
for (unsigned j = 0, ee = ValNo->kills.size(); j != ee; ++j) {
|
||||
unsigned Kill = ValNo->kills[j].killIdx;
|
||||
if (Kill != BLR->end)
|
||||
BKills.push_back(VNInfo::KillInfo(ValNo->kills[j].isPHIKill, Kill));
|
||||
if (ValNo->kills[j] != BLR->end)
|
||||
BKills.push_back(ValNo->kills[j]);
|
||||
}
|
||||
ValNo->kills.clear();
|
||||
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
|
||||
AI != AE; ++AI) {
|
||||
if (AI->valno != AValNo) continue;
|
||||
unsigned End = AI->end;
|
||||
std::map<unsigned, unsigned>::iterator EI = BExtend.find(End);
|
||||
MachineInstrIndex End = AI->end;
|
||||
std::map<MachineInstrIndex, MachineInstrIndex>::iterator
|
||||
EI = BExtend.find(End);
|
||||
if (EI != BExtend.end())
|
||||
End = EI->second;
|
||||
IntB.addRange(LiveRange(AI->start, End, ValNo));
|
||||
@ -538,7 +543,8 @@ static bool isSameOrFallThroughBB(MachineBasicBlock *MBB,
|
||||
/// removeRange - Wrapper for LiveInterval::removeRange. This removes a range
|
||||
/// from a physical register live interval as well as from the live intervals
|
||||
/// of its sub-registers.
|
||||
static void removeRange(LiveInterval &li, unsigned Start, unsigned End,
|
||||
static void removeRange(LiveInterval &li,
|
||||
MachineInstrIndex Start, MachineInstrIndex End,
|
||||
LiveIntervals *li_, const TargetRegisterInfo *tri_) {
|
||||
li.removeRange(Start, End, true);
|
||||
if (TargetRegisterInfo::isPhysicalRegister(li.reg)) {
|
||||
@ -546,7 +552,7 @@ static void removeRange(LiveInterval &li, unsigned Start, unsigned End,
|
||||
if (!li_->hasInterval(*SR))
|
||||
continue;
|
||||
LiveInterval &sli = li_->getInterval(*SR);
|
||||
unsigned RemoveEnd = Start;
|
||||
MachineInstrIndex RemoveEnd = Start;
|
||||
while (RemoveEnd != End) {
|
||||
LiveInterval::iterator LR = sli.FindLiveRangeContaining(Start);
|
||||
if (LR == sli.end())
|
||||
@ -563,14 +569,14 @@ static void removeRange(LiveInterval &li, unsigned Start, unsigned End,
|
||||
/// as the copy instruction, trim the live interval to the last use and return
|
||||
/// true.
|
||||
bool
|
||||
SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(unsigned CopyIdx,
|
||||
SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(MachineInstrIndex CopyIdx,
|
||||
MachineBasicBlock *CopyMBB,
|
||||
LiveInterval &li,
|
||||
const LiveRange *LR) {
|
||||
unsigned MBBStart = li_->getMBBStartIdx(CopyMBB);
|
||||
unsigned LastUseIdx;
|
||||
MachineOperand *LastUse = lastRegisterUse(LR->start, CopyIdx-1, li.reg,
|
||||
LastUseIdx);
|
||||
MachineInstrIndex MBBStart = li_->getMBBStartIdx(CopyMBB);
|
||||
MachineInstrIndex LastUseIdx;
|
||||
MachineOperand *LastUse =
|
||||
lastRegisterUse(LR->start, li_->getPrevSlot(CopyIdx), li.reg, LastUseIdx);
|
||||
if (LastUse) {
|
||||
MachineInstr *LastUseMI = LastUse->getParent();
|
||||
if (!isSameOrFallThroughBB(LastUseMI->getParent(), CopyMBB, tii_)) {
|
||||
@ -590,7 +596,7 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(unsigned CopyIdx,
|
||||
// of last use.
|
||||
LastUse->setIsKill();
|
||||
removeRange(li, li_->getDefIndex(LastUseIdx), LR->end, li_, tri_);
|
||||
li.addKill(LR->valno, LastUseIdx+1, false);
|
||||
LR->valno->addKill(li_->getNextSlot(LastUseIdx));
|
||||
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
||||
if (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
|
||||
DstReg == li.reg) {
|
||||
@ -603,7 +609,7 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(unsigned CopyIdx,
|
||||
|
||||
// Is it livein?
|
||||
if (LR->start <= MBBStart && LR->end > MBBStart) {
|
||||
if (LR->start == 0) {
|
||||
if (LR->start == MachineInstrIndex()) {
|
||||
assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
|
||||
// Live-in to the function but dead. Remove it from entry live-in set.
|
||||
mf_->begin()->removeLiveIn(li.reg);
|
||||
@ -620,7 +626,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
|
||||
unsigned DstReg,
|
||||
unsigned DstSubIdx,
|
||||
MachineInstr *CopyMI) {
|
||||
unsigned CopyIdx = li_->getUseIndex(li_->getInstructionIndex(CopyMI));
|
||||
MachineInstrIndex CopyIdx = li_->getUseIndex(li_->getInstructionIndex(CopyMI));
|
||||
LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
|
||||
assert(SrcLR != SrcInt.end() && "Live range not found!");
|
||||
VNInfo *ValNo = SrcLR->valno;
|
||||
@ -654,7 +660,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned DefIdx = li_->getDefIndex(CopyIdx);
|
||||
MachineInstrIndex DefIdx = li_->getDefIndex(CopyIdx);
|
||||
const LiveRange *DLR= li_->getInterval(DstReg).getLiveRangeContaining(DefIdx);
|
||||
DLR->valno->setCopy(0);
|
||||
// Don't forget to update sub-register intervals.
|
||||
@ -687,7 +693,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
|
||||
// should mark it dead:
|
||||
if (DefMI->getParent() == MBB) {
|
||||
DefMI->addRegisterDead(SrcInt.reg, tri_);
|
||||
SrcLR->end = SrcLR->start + 1;
|
||||
SrcLR->end = li_->getNextSlot(SrcLR->start);
|
||||
}
|
||||
}
|
||||
|
||||
@ -726,12 +732,12 @@ bool SimpleRegisterCoalescing::isBackEdgeCopy(MachineInstr *CopyMI,
|
||||
return false;
|
||||
|
||||
LiveInterval &LI = li_->getInterval(DstReg);
|
||||
unsigned DefIdx = li_->getInstructionIndex(CopyMI);
|
||||
MachineInstrIndex DefIdx = li_->getInstructionIndex(CopyMI);
|
||||
LiveInterval::const_iterator DstLR =
|
||||
LI.FindLiveRangeContaining(li_->getDefIndex(DefIdx));
|
||||
if (DstLR == LI.end())
|
||||
return false;
|
||||
if (DstLR->valno->kills.size() == 1 && DstLR->valno->kills[0].isPHIKill)
|
||||
if (DstLR->valno->kills.size() == 1 && DstLR->valno->kills[0].isPHIIndex())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@ -807,7 +813,8 @@ SimpleRegisterCoalescing::UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg,
|
||||
(TargetRegisterInfo::isVirtualRegister(CopyDstReg) ||
|
||||
allocatableRegs_[CopyDstReg])) {
|
||||
LiveInterval &LI = li_->getInterval(CopyDstReg);
|
||||
unsigned DefIdx = li_->getDefIndex(li_->getInstructionIndex(UseMI));
|
||||
MachineInstrIndex DefIdx =
|
||||
li_->getDefIndex(li_->getInstructionIndex(UseMI));
|
||||
if (const LiveRange *DLR = LI.getLiveRangeContaining(DefIdx)) {
|
||||
if (DLR->valno->def == DefIdx)
|
||||
DLR->valno->setCopy(UseMI);
|
||||
@ -826,10 +833,11 @@ void SimpleRegisterCoalescing::RemoveUnnecessaryKills(unsigned Reg,
|
||||
if (!UseMO.isKill())
|
||||
continue;
|
||||
MachineInstr *UseMI = UseMO.getParent();
|
||||
unsigned UseIdx = li_->getUseIndex(li_->getInstructionIndex(UseMI));
|
||||
MachineInstrIndex UseIdx =
|
||||
li_->getUseIndex(li_->getInstructionIndex(UseMI));
|
||||
const LiveRange *LR = LI.getLiveRangeContaining(UseIdx);
|
||||
if (!LR || !LI.isKill(LR->valno, UseIdx+1)) {
|
||||
if (LR->valno->def != UseIdx+1) {
|
||||
if (!LR || !LR->valno->isKill(li_->getNextSlot(UseIdx))) {
|
||||
if (LR->valno->def != li_->getNextSlot(UseIdx)) {
|
||||
// Interesting problem. After coalescing reg1027's def and kill are both
|
||||
// at the same point: %reg1027,0.000000e+00 = [56,814:0) 0@70-(814)
|
||||
//
|
||||
@ -871,16 +879,16 @@ static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_,
|
||||
/// Return true if live interval is removed.
|
||||
bool SimpleRegisterCoalescing::ShortenDeadCopyLiveRange(LiveInterval &li,
|
||||
MachineInstr *CopyMI) {
|
||||
unsigned CopyIdx = li_->getInstructionIndex(CopyMI);
|
||||
MachineInstrIndex CopyIdx = li_->getInstructionIndex(CopyMI);
|
||||
LiveInterval::iterator MLR =
|
||||
li.FindLiveRangeContaining(li_->getDefIndex(CopyIdx));
|
||||
if (MLR == li.end())
|
||||
return false; // Already removed by ShortenDeadCopySrcLiveRange.
|
||||
unsigned RemoveStart = MLR->start;
|
||||
unsigned RemoveEnd = MLR->end;
|
||||
unsigned DefIdx = li_->getDefIndex(CopyIdx);
|
||||
MachineInstrIndex RemoveStart = MLR->start;
|
||||
MachineInstrIndex RemoveEnd = MLR->end;
|
||||
MachineInstrIndex DefIdx = li_->getDefIndex(CopyIdx);
|
||||
// Remove the liverange that's defined by this.
|
||||
if (RemoveStart == DefIdx && RemoveEnd == DefIdx+1) {
|
||||
if (RemoveStart == DefIdx && RemoveEnd == li_->getNextSlot(DefIdx)) {
|
||||
removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
|
||||
return removeIntervalIfEmpty(li, li_, tri_);
|
||||
}
|
||||
@ -891,7 +899,7 @@ bool SimpleRegisterCoalescing::ShortenDeadCopyLiveRange(LiveInterval &li,
|
||||
/// the val# it defines. If the live interval becomes empty, remove it as well.
|
||||
bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
|
||||
MachineInstr *DefMI) {
|
||||
unsigned DefIdx = li_->getDefIndex(li_->getInstructionIndex(DefMI));
|
||||
MachineInstrIndex DefIdx = li_->getDefIndex(li_->getInstructionIndex(DefMI));
|
||||
LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
|
||||
if (DefIdx != MLR->valno->def)
|
||||
return false;
|
||||
@ -902,7 +910,7 @@ bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
|
||||
/// PropagateDeadness - Propagate the dead marker to the instruction which
|
||||
/// defines the val#.
|
||||
static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
|
||||
unsigned &LRStart, LiveIntervals *li_,
|
||||
MachineInstrIndex &LRStart, LiveIntervals *li_,
|
||||
const TargetRegisterInfo* tri_) {
|
||||
MachineInstr *DefMI =
|
||||
li_->getInstructionFromIndex(li_->getDefIndex(LRStart));
|
||||
@ -913,7 +921,7 @@ static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
|
||||
else
|
||||
DefMI->addOperand(MachineOperand::CreateReg(li.reg,
|
||||
true, true, false, true));
|
||||
++LRStart;
|
||||
LRStart = li_->getNextSlot(LRStart);
|
||||
}
|
||||
}
|
||||
|
||||
@ -924,8 +932,8 @@ static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
|
||||
bool
|
||||
SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
|
||||
MachineInstr *CopyMI) {
|
||||
unsigned CopyIdx = li_->getInstructionIndex(CopyMI);
|
||||
if (CopyIdx == 0) {
|
||||
MachineInstrIndex CopyIdx = li_->getInstructionIndex(CopyMI);
|
||||
if (CopyIdx == MachineInstrIndex()) {
|
||||
// FIXME: special case: function live in. It can be a general case if the
|
||||
// first instruction index starts at > 0 value.
|
||||
assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
|
||||
@ -937,13 +945,14 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
|
||||
return removeIntervalIfEmpty(li, li_, tri_);
|
||||
}
|
||||
|
||||
LiveInterval::iterator LR = li.FindLiveRangeContaining(CopyIdx-1);
|
||||
LiveInterval::iterator LR =
|
||||
li.FindLiveRangeContaining(li_->getPrevSlot(CopyIdx));
|
||||
if (LR == li.end())
|
||||
// Livein but defined by a phi.
|
||||
return false;
|
||||
|
||||
unsigned RemoveStart = LR->start;
|
||||
unsigned RemoveEnd = li_->getDefIndex(CopyIdx)+1;
|
||||
MachineInstrIndex RemoveStart = LR->start;
|
||||
MachineInstrIndex RemoveEnd = li_->getNextSlot(li_->getDefIndex(CopyIdx));
|
||||
if (LR->end > RemoveEnd)
|
||||
// More uses past this copy? Nothing to do.
|
||||
return false;
|
||||
@ -963,7 +972,7 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
|
||||
// If the live range starts in another mbb and the copy mbb is not a fall
|
||||
// through mbb, then we can only cut the range from the beginning of the
|
||||
// copy mbb.
|
||||
RemoveStart = li_->getMBBStartIdx(CopyMBB) + 1;
|
||||
RemoveStart = li_->getNextSlot(li_->getMBBStartIdx(CopyMBB));
|
||||
|
||||
if (LR->valno->def == RemoveStart) {
|
||||
// If the def MI defines the val# and this copy is the only kill of the
|
||||
@ -971,8 +980,8 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
|
||||
PropagateDeadness(li, CopyMI, RemoveStart, li_, tri_);
|
||||
++numDeadValNo;
|
||||
|
||||
if (li.isKill(LR->valno, RemoveEnd))
|
||||
li.removeKill(LR->valno, RemoveEnd);
|
||||
if (LR->valno->isKill(RemoveEnd))
|
||||
LR->valno->removeKill(RemoveEnd);
|
||||
}
|
||||
|
||||
removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
|
||||
@ -1019,13 +1028,14 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
|
||||
|
||||
// If the virtual register live interval extends into a loop, turn down
|
||||
// aggressiveness.
|
||||
unsigned CopyIdx = li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
MachineInstrIndex CopyIdx =
|
||||
li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
const MachineLoop *L = loopInfo->getLoopFor(CopyMBB);
|
||||
if (!L) {
|
||||
// Let's see if the virtual register live interval extends into the loop.
|
||||
LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(CopyIdx);
|
||||
assert(DLR != DstInt.end() && "Live range not found!");
|
||||
DLR = DstInt.FindLiveRangeContaining(DLR->end+1);
|
||||
DLR = DstInt.FindLiveRangeContaining(li_->getNextSlot(DLR->end));
|
||||
if (DLR != DstInt.end()) {
|
||||
CopyMBB = li_->getMBBFromIndex(DLR->start);
|
||||
L = loopInfo->getLoopFor(CopyMBB);
|
||||
@ -1035,7 +1045,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
|
||||
if (!L || Length <= Threshold)
|
||||
return true;
|
||||
|
||||
unsigned UseIdx = li_->getUseIndex(CopyIdx);
|
||||
MachineInstrIndex UseIdx = li_->getUseIndex(CopyIdx);
|
||||
LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
|
||||
MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
|
||||
if (loopInfo->getLoopFor(SMBB) != L) {
|
||||
@ -1048,7 +1058,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
|
||||
if (SuccMBB == CopyMBB)
|
||||
continue;
|
||||
if (DstInt.overlaps(li_->getMBBStartIdx(SuccMBB),
|
||||
li_->getMBBEndIdx(SuccMBB)+1))
|
||||
li_->getNextSlot(li_->getMBBEndIdx(SuccMBB))))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1079,11 +1089,12 @@ SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
|
||||
|
||||
// If the virtual register live interval is defined or cross a loop, turn
|
||||
// down aggressiveness.
|
||||
unsigned CopyIdx = li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
unsigned UseIdx = li_->getUseIndex(CopyIdx);
|
||||
MachineInstrIndex CopyIdx =
|
||||
li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
MachineInstrIndex UseIdx = li_->getUseIndex(CopyIdx);
|
||||
LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
|
||||
assert(SLR != SrcInt.end() && "Live range not found!");
|
||||
SLR = SrcInt.FindLiveRangeContaining(SLR->start-1);
|
||||
SLR = SrcInt.FindLiveRangeContaining(li_->getPrevSlot(SLR->start));
|
||||
if (SLR == SrcInt.end())
|
||||
return true;
|
||||
MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
|
||||
@ -1103,7 +1114,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
|
||||
if (PredMBB == SMBB)
|
||||
continue;
|
||||
if (SrcInt.overlaps(li_->getMBBStartIdx(PredMBB),
|
||||
li_->getMBBEndIdx(PredMBB)+1))
|
||||
li_->getNextSlot(li_->getMBBEndIdx(PredMBB))))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1729,7 +1740,8 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
|
||||
e = ResSrcInt->vni_end(); i != e; ++i) {
|
||||
const VNInfo *vni = *i;
|
||||
// FIXME: Do isPHIDef and isDefAccurate both need to be tested?
|
||||
if (!vni->def || vni->isUnused() || vni->isPHIDef() || !vni->isDefAccurate())
|
||||
if (vni->def == MachineInstrIndex() || vni->isUnused() || vni->isPHIDef() ||
|
||||
!vni->isDefAccurate())
|
||||
continue;
|
||||
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
|
||||
unsigned NewSrcReg, NewDstReg, NewSrcSubIdx, NewDstSubIdx;
|
||||
@ -2141,7 +2153,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
||||
}
|
||||
} else {
|
||||
// It was defined as a copy from the LHS, find out what value # it is.
|
||||
RHSValNoInfo = LHS.getLiveRangeContaining(RHSValNoInfo0->def-1)->valno;
|
||||
RHSValNoInfo =
|
||||
LHS.getLiveRangeContaining(li_->getPrevSlot(RHSValNoInfo0->def))->valno;
|
||||
RHSValID = RHSValNoInfo->id;
|
||||
RHSVal0DefinedFromLHS = RHSValID;
|
||||
}
|
||||
@ -2204,7 +2217,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
||||
continue;
|
||||
|
||||
// Figure out the value # from the RHS.
|
||||
LHSValsDefinedFromRHS[VNI]=RHS.getLiveRangeContaining(VNI->def-1)->valno;
|
||||
LHSValsDefinedFromRHS[VNI]=
|
||||
RHS.getLiveRangeContaining(li_->getPrevSlot(VNI->def))->valno;
|
||||
}
|
||||
|
||||
// Loop over the value numbers of the RHS, seeing if any are defined from
|
||||
@ -2221,7 +2235,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
||||
continue;
|
||||
|
||||
// Figure out the value # from the LHS.
|
||||
RHSValsDefinedFromLHS[VNI]=LHS.getLiveRangeContaining(VNI->def-1)->valno;
|
||||
RHSValsDefinedFromLHS[VNI]=
|
||||
LHS.getLiveRangeContaining(li_->getPrevSlot(VNI->def))->valno;
|
||||
}
|
||||
|
||||
LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
|
||||
@ -2305,7 +2320,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
||||
E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
|
||||
VNInfo *VNI = I->first;
|
||||
unsigned LHSValID = LHSValNoAssignments[VNI->id];
|
||||
LiveInterval::removeKill(NewVNInfo[LHSValID], VNI->def);
|
||||
NewVNInfo[LHSValID]->removeKill(VNI->def);
|
||||
if (VNI->hasPHIKill())
|
||||
NewVNInfo[LHSValID]->setHasPHIKill(true);
|
||||
RHS.addKills(NewVNInfo[LHSValID], VNI->kills);
|
||||
@ -2316,7 +2331,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
||||
E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
|
||||
VNInfo *VNI = I->first;
|
||||
unsigned RHSValID = RHSValNoAssignments[VNI->id];
|
||||
LiveInterval::removeKill(NewVNInfo[RHSValID], VNI->def);
|
||||
NewVNInfo[RHSValID]->removeKill(VNI->def);
|
||||
if (VNI->hasPHIKill())
|
||||
NewVNInfo[RHSValID]->setHasPHIKill(true);
|
||||
LHS.addKills(NewVNInfo[RHSValID], VNI->kills);
|
||||
@ -2541,9 +2556,11 @@ SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA,
|
||||
/// lastRegisterUse - Returns the last use of the specific register between
|
||||
/// cycles Start and End or NULL if there are no uses.
|
||||
MachineOperand *
|
||||
SimpleRegisterCoalescing::lastRegisterUse(unsigned Start, unsigned End,
|
||||
unsigned Reg, unsigned &UseIdx) const{
|
||||
UseIdx = 0;
|
||||
SimpleRegisterCoalescing::lastRegisterUse(MachineInstrIndex Start,
|
||||
MachineInstrIndex End,
|
||||
unsigned Reg,
|
||||
MachineInstrIndex &UseIdx) const{
|
||||
UseIdx = MachineInstrIndex();
|
||||
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
||||
MachineOperand *LastUse = NULL;
|
||||
for (MachineRegisterInfo::use_iterator I = mri_->use_begin(Reg),
|
||||
@ -2555,7 +2572,7 @@ SimpleRegisterCoalescing::lastRegisterUse(unsigned Start, unsigned End,
|
||||
SrcReg == DstReg)
|
||||
// Ignore identity copies.
|
||||
continue;
|
||||
unsigned Idx = li_->getInstructionIndex(UseMI);
|
||||
MachineInstrIndex Idx = li_->getInstructionIndex(UseMI);
|
||||
if (Idx >= Start && Idx < End && Idx >= UseIdx) {
|
||||
LastUse = &Use;
|
||||
UseIdx = li_->getUseIndex(Idx);
|
||||
@ -2564,13 +2581,13 @@ SimpleRegisterCoalescing::lastRegisterUse(unsigned Start, unsigned End,
|
||||
return LastUse;
|
||||
}
|
||||
|
||||
int e = (End-1) / InstrSlots::NUM * InstrSlots::NUM;
|
||||
int s = Start;
|
||||
MachineInstrIndex s = Start;
|
||||
MachineInstrIndex e = li_->getBaseIndex(li_->getPrevSlot(End));
|
||||
while (e >= s) {
|
||||
// Skip deleted instructions
|
||||
MachineInstr *MI = li_->getInstructionFromIndex(e);
|
||||
while ((e - InstrSlots::NUM) >= s && !MI) {
|
||||
e -= InstrSlots::NUM;
|
||||
while (e != MachineInstrIndex() && li_->getPrevIndex(e) >= s && !MI) {
|
||||
e = li_->getPrevIndex(e);
|
||||
MI = li_->getInstructionFromIndex(e);
|
||||
}
|
||||
if (e < s || MI == NULL)
|
||||
@ -2589,7 +2606,7 @@ SimpleRegisterCoalescing::lastRegisterUse(unsigned Start, unsigned End,
|
||||
}
|
||||
}
|
||||
|
||||
e -= InstrSlots::NUM;
|
||||
e = li_->getPrevIndex(e);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -2609,10 +2626,10 @@ void SimpleRegisterCoalescing::releaseMemory() {
|
||||
ReMatDefs.clear();
|
||||
}
|
||||
|
||||
static bool isZeroLengthInterval(LiveInterval *li) {
|
||||
bool SimpleRegisterCoalescing::isZeroLengthInterval(LiveInterval *li) const {
|
||||
for (LiveInterval::Ranges::const_iterator
|
||||
i = li->ranges.begin(), e = li->ranges.end(); i != e; ++i)
|
||||
if (i->end - i->start > LiveInterval::InstrSlots::NUM)
|
||||
if (li_->getPrevIndex(i->end) > i->start)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ namespace llvm {
|
||||
/// TrimLiveIntervalToLastUse - If there is a last use in the same basic
|
||||
/// block as the copy instruction, trim the ive interval to the last use
|
||||
/// and return true.
|
||||
bool TrimLiveIntervalToLastUse(unsigned CopyIdx,
|
||||
bool TrimLiveIntervalToLastUse(MachineInstrIndex CopyIdx,
|
||||
MachineBasicBlock *CopyMBB,
|
||||
LiveInterval &li, const LiveRange *LR);
|
||||
|
||||
@ -289,10 +289,13 @@ namespace llvm {
|
||||
|
||||
/// lastRegisterUse - Returns the last use of the specific register between
|
||||
/// cycles Start and End or NULL if there are no uses.
|
||||
MachineOperand *lastRegisterUse(unsigned Start, unsigned End, unsigned Reg,
|
||||
unsigned &LastUseIdx) const;
|
||||
MachineOperand *lastRegisterUse(MachineInstrIndex Start, MachineInstrIndex End,
|
||||
unsigned Reg, MachineInstrIndex &LastUseIdx) const;
|
||||
|
||||
void printRegName(unsigned reg) const;
|
||||
|
||||
/// Returns true if the given live interval is zero length.
|
||||
bool isZeroLengthInterval(LiveInterval *li) const;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
@ -51,13 +51,13 @@ protected:
|
||||
|
||||
/// Ensures there is space before the given machine instruction, returns the
|
||||
/// instruction's new number.
|
||||
unsigned makeSpaceBefore(MachineInstr *mi) {
|
||||
MachineInstrIndex makeSpaceBefore(MachineInstr *mi) {
|
||||
if (!lis->hasGapBeforeInstr(lis->getInstructionIndex(mi))) {
|
||||
lis->scaleNumbering(2);
|
||||
ls->scaleNumbering(2);
|
||||
}
|
||||
|
||||
unsigned miIdx = lis->getInstructionIndex(mi);
|
||||
MachineInstrIndex miIdx = lis->getInstructionIndex(mi);
|
||||
|
||||
assert(lis->hasGapBeforeInstr(miIdx));
|
||||
|
||||
@ -66,13 +66,13 @@ protected:
|
||||
|
||||
/// Ensure there is space after the given machine instruction, returns the
|
||||
/// instruction's new number.
|
||||
unsigned makeSpaceAfter(MachineInstr *mi) {
|
||||
MachineInstrIndex makeSpaceAfter(MachineInstr *mi) {
|
||||
if (!lis->hasGapAfterInstr(lis->getInstructionIndex(mi))) {
|
||||
lis->scaleNumbering(2);
|
||||
ls->scaleNumbering(2);
|
||||
}
|
||||
|
||||
unsigned miIdx = lis->getInstructionIndex(mi);
|
||||
MachineInstrIndex miIdx = lis->getInstructionIndex(mi);
|
||||
|
||||
assert(lis->hasGapAfterInstr(miIdx));
|
||||
|
||||
@ -83,19 +83,19 @@ protected:
|
||||
/// after the given instruction. Returns the base index of the inserted
|
||||
/// instruction. The caller is responsible for adding an appropriate
|
||||
/// LiveInterval to the LiveIntervals analysis.
|
||||
unsigned insertStoreAfter(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
MachineInstrIndex insertStoreAfter(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
|
||||
MachineBasicBlock::iterator nextInstItr(next(mi));
|
||||
|
||||
unsigned miIdx = makeSpaceAfter(mi);
|
||||
MachineInstrIndex miIdx = makeSpaceAfter(mi);
|
||||
|
||||
tii->storeRegToStackSlot(*mi->getParent(), nextInstItr, vreg,
|
||||
true, ss, trc);
|
||||
MachineBasicBlock::iterator storeInstItr(next(mi));
|
||||
MachineInstr *storeInst = &*storeInstItr;
|
||||
unsigned storeInstIdx = miIdx + LiveInterval::InstrSlots::NUM;
|
||||
MachineInstrIndex storeInstIdx = lis->getNextIndex(miIdx);
|
||||
|
||||
assert(lis->getInstructionFromIndex(storeInstIdx) == 0 &&
|
||||
"Store inst index already in use.");
|
||||
@ -108,15 +108,15 @@ protected:
|
||||
/// Insert a store of the given vreg to the given stack slot immediately
|
||||
/// before the given instructnion. Returns the base index of the inserted
|
||||
/// Instruction.
|
||||
unsigned insertStoreBefore(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
unsigned miIdx = makeSpaceBefore(mi);
|
||||
MachineInstrIndex insertStoreBefore(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
MachineInstrIndex miIdx = makeSpaceBefore(mi);
|
||||
|
||||
tii->storeRegToStackSlot(*mi->getParent(), mi, vreg, true, ss, trc);
|
||||
MachineBasicBlock::iterator storeInstItr(prior(mi));
|
||||
MachineInstr *storeInst = &*storeInstItr;
|
||||
unsigned storeInstIdx = miIdx - LiveInterval::InstrSlots::NUM;
|
||||
MachineInstrIndex storeInstIdx = lis->getPrevIndex(miIdx);
|
||||
|
||||
assert(lis->getInstructionFromIndex(storeInstIdx) == 0 &&
|
||||
"Store inst index already in use.");
|
||||
@ -131,13 +131,13 @@ protected:
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
|
||||
unsigned storeInstIdx = insertStoreAfter(mi, ss, vreg, trc);
|
||||
unsigned start = lis->getDefIndex(lis->getInstructionIndex(mi)),
|
||||
end = lis->getUseIndex(storeInstIdx);
|
||||
MachineInstrIndex storeInstIdx = insertStoreAfter(mi, ss, vreg, trc);
|
||||
MachineInstrIndex start = lis->getDefIndex(lis->getInstructionIndex(mi)),
|
||||
end = lis->getUseIndex(storeInstIdx);
|
||||
|
||||
VNInfo *vni =
|
||||
li->getNextValue(storeInstIdx, 0, true, lis->getVNInfoAllocator());
|
||||
li->addKill(vni, storeInstIdx, false);
|
||||
vni->addKill(storeInstIdx);
|
||||
DEBUG(errs() << " Inserting store range: [" << start
|
||||
<< ", " << end << ")\n");
|
||||
LiveRange lr(start, end, vni);
|
||||
@ -149,18 +149,18 @@ protected:
|
||||
/// after the given instruction. Returns the base index of the inserted
|
||||
/// instruction. The caller is responsibel for adding/removing an appropriate
|
||||
/// range vreg's LiveInterval.
|
||||
unsigned insertLoadAfter(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
MachineInstrIndex insertLoadAfter(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
|
||||
MachineBasicBlock::iterator nextInstItr(next(mi));
|
||||
|
||||
unsigned miIdx = makeSpaceAfter(mi);
|
||||
MachineInstrIndex miIdx = makeSpaceAfter(mi);
|
||||
|
||||
tii->loadRegFromStackSlot(*mi->getParent(), nextInstItr, vreg, ss, trc);
|
||||
MachineBasicBlock::iterator loadInstItr(next(mi));
|
||||
MachineInstr *loadInst = &*loadInstItr;
|
||||
unsigned loadInstIdx = miIdx + LiveInterval::InstrSlots::NUM;
|
||||
MachineInstrIndex loadInstIdx = lis->getNextIndex(miIdx);
|
||||
|
||||
assert(lis->getInstructionFromIndex(loadInstIdx) == 0 &&
|
||||
"Store inst index already in use.");
|
||||
@ -174,15 +174,15 @@ protected:
|
||||
/// before the given instruction. Returns the base index of the inserted
|
||||
/// instruction. The caller is responsible for adding an appropriate
|
||||
/// LiveInterval to the LiveIntervals analysis.
|
||||
unsigned insertLoadBefore(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
unsigned miIdx = makeSpaceBefore(mi);
|
||||
MachineInstrIndex insertLoadBefore(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
MachineInstrIndex miIdx = makeSpaceBefore(mi);
|
||||
|
||||
tii->loadRegFromStackSlot(*mi->getParent(), mi, vreg, ss, trc);
|
||||
MachineBasicBlock::iterator loadInstItr(prior(mi));
|
||||
MachineInstr *loadInst = &*loadInstItr;
|
||||
unsigned loadInstIdx = miIdx - LiveInterval::InstrSlots::NUM;
|
||||
MachineInstrIndex loadInstIdx = lis->getPrevIndex(miIdx);
|
||||
|
||||
assert(lis->getInstructionFromIndex(loadInstIdx) == 0 &&
|
||||
"Load inst index already in use.");
|
||||
@ -197,13 +197,13 @@ protected:
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
|
||||
unsigned loadInstIdx = insertLoadBefore(mi, ss, vreg, trc);
|
||||
unsigned start = lis->getDefIndex(loadInstIdx),
|
||||
end = lis->getUseIndex(lis->getInstructionIndex(mi));
|
||||
MachineInstrIndex loadInstIdx = insertLoadBefore(mi, ss, vreg, trc);
|
||||
MachineInstrIndex start = lis->getDefIndex(loadInstIdx),
|
||||
end = lis->getUseIndex(lis->getInstructionIndex(mi));
|
||||
|
||||
VNInfo *vni =
|
||||
li->getNextValue(loadInstIdx, 0, true, lis->getVNInfoAllocator());
|
||||
li->addKill(vni, lis->getInstructionIndex(mi), false);
|
||||
vni->addKill(lis->getInstructionIndex(mi));
|
||||
DEBUG(errs() << " Intserting load range: [" << start
|
||||
<< ", " << end << ")\n");
|
||||
LiveRange lr(start, end, vni);
|
||||
@ -321,23 +321,21 @@ public:
|
||||
vrm->assignVirt2StackSlot(li->reg, ss);
|
||||
|
||||
MachineInstr *mi = 0;
|
||||
unsigned storeIdx = 0;
|
||||
MachineInstrIndex storeIdx = MachineInstrIndex();
|
||||
|
||||
if (valno->isDefAccurate()) {
|
||||
// If we have an accurate def we can just grab an iterator to the instr
|
||||
// after the def.
|
||||
mi = lis->getInstructionFromIndex(valno->def);
|
||||
storeIdx = insertStoreAfter(mi, ss, li->reg, trc) +
|
||||
LiveInterval::InstrSlots::DEF;
|
||||
storeIdx = lis->getDefIndex(insertStoreAfter(mi, ss, li->reg, trc));
|
||||
} else {
|
||||
// if we get here we have a PHI def.
|
||||
mi = &lis->getMBBFromIndex(valno->def)->front();
|
||||
storeIdx = insertStoreBefore(mi, ss, li->reg, trc) +
|
||||
LiveInterval::InstrSlots::DEF;
|
||||
storeIdx = lis->getDefIndex(insertStoreBefore(mi, ss, li->reg, trc));
|
||||
}
|
||||
|
||||
MachineBasicBlock *defBlock = mi->getParent();
|
||||
unsigned loadIdx = 0;
|
||||
MachineInstrIndex loadIdx = MachineInstrIndex();
|
||||
|
||||
// Now we need to find the load...
|
||||
MachineBasicBlock::iterator useItr(mi);
|
||||
@ -345,13 +343,11 @@ public:
|
||||
|
||||
if (useItr != defBlock->end()) {
|
||||
MachineInstr *loadInst = useItr;
|
||||
loadIdx = insertLoadBefore(loadInst, ss, li->reg, trc) +
|
||||
LiveInterval::InstrSlots::USE;
|
||||
loadIdx = lis->getUseIndex(insertLoadBefore(loadInst, ss, li->reg, trc));
|
||||
}
|
||||
else {
|
||||
MachineInstr *loadInst = &defBlock->back();
|
||||
loadIdx = insertLoadAfter(loadInst, ss, li->reg, trc) +
|
||||
LiveInterval::InstrSlots::USE;
|
||||
loadIdx = lis->getUseIndex(insertLoadAfter(loadInst, ss, li->reg, trc));
|
||||
}
|
||||
|
||||
li->removeRange(storeIdx, loadIdx, true);
|
||||
|
@ -295,7 +295,7 @@ StrongPHIElimination::computeDomForest(
|
||||
static bool isLiveIn(unsigned r, MachineBasicBlock* MBB,
|
||||
LiveIntervals& LI) {
|
||||
LiveInterval& I = LI.getOrCreateInterval(r);
|
||||
unsigned idx = LI.getMBBStartIdx(MBB);
|
||||
MachineInstrIndex idx = LI.getMBBStartIdx(MBB);
|
||||
return I.liveAt(idx);
|
||||
}
|
||||
|
||||
@ -428,7 +428,7 @@ void StrongPHIElimination::processBlock(MachineBasicBlock* MBB) {
|
||||
}
|
||||
|
||||
LiveInterval& PI = LI.getOrCreateInterval(DestReg);
|
||||
unsigned pIdx = LI.getDefIndex(LI.getInstructionIndex(P));
|
||||
MachineInstrIndex pIdx = LI.getDefIndex(LI.getInstructionIndex(P));
|
||||
VNInfo* PVN = PI.getLiveRangeContaining(pIdx)->valno;
|
||||
PhiValueNumber.insert(std::make_pair(DestReg, PVN->id));
|
||||
|
||||
@ -748,7 +748,7 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
|
||||
|
||||
LiveInterval& I = LI.getInterval(curr.second);
|
||||
MachineBasicBlock::iterator term = MBB->getFirstTerminator();
|
||||
unsigned endIdx = 0;
|
||||
MachineInstrIndex endIdx = MachineInstrIndex();
|
||||
if (term != MBB->end())
|
||||
endIdx = LI.getInstructionIndex(term);
|
||||
else
|
||||
@ -784,10 +784,10 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
|
||||
InsertedPHIDests.begin(), E = InsertedPHIDests.end(); I != E; ++I) {
|
||||
if (RegHandled.insert(I->first).second) {
|
||||
LiveInterval& Int = LI.getOrCreateInterval(I->first);
|
||||
unsigned instrIdx = LI.getInstructionIndex(I->second);
|
||||
MachineInstrIndex instrIdx = LI.getInstructionIndex(I->second);
|
||||
if (Int.liveAt(LiveIntervals::getDefIndex(instrIdx)))
|
||||
Int.removeRange(LiveIntervals::getDefIndex(instrIdx),
|
||||
LI.getMBBEndIdx(I->second->getParent())+1,
|
||||
LI.getNextSlot(LI.getMBBEndIdx(I->second->getParent())),
|
||||
true);
|
||||
|
||||
LiveRange R = LI.addLiveRangeToEndOfBlock(I->first, I->second);
|
||||
@ -831,11 +831,11 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
|
||||
VNInfo* FirstVN = *Int.vni_begin();
|
||||
FirstVN->setHasPHIKill(false);
|
||||
if (I->getOperand(i).isKill())
|
||||
Int.addKill(FirstVN,
|
||||
LiveIntervals::getUseIndex(LI.getInstructionIndex(I)), false);
|
||||
FirstVN->addKill(
|
||||
LiveIntervals::getUseIndex(LI.getInstructionIndex(I)));
|
||||
|
||||
LiveRange LR (LI.getMBBStartIdx(I->getParent()),
|
||||
LiveIntervals::getUseIndex(LI.getInstructionIndex(I))+1,
|
||||
LI.getNextSlot(LI.getUseIndex(LI.getInstructionIndex(I))),
|
||||
FirstVN);
|
||||
|
||||
Int.addRange(LR);
|
||||
@ -870,8 +870,8 @@ bool StrongPHIElimination::mergeLiveIntervals(unsigned primary,
|
||||
for (LiveInterval::iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
|
||||
LiveRange R = *I;
|
||||
|
||||
unsigned Start = R.start;
|
||||
unsigned End = R.end;
|
||||
MachineInstrIndex Start = R.start;
|
||||
MachineInstrIndex End = R.end;
|
||||
if (LHS.getLiveRangeContaining(Start))
|
||||
return false;
|
||||
|
||||
@ -968,11 +968,11 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
LI.computeNumbering();
|
||||
|
||||
LiveInterval& Int = LI.getOrCreateInterval(I->first);
|
||||
unsigned instrIdx =
|
||||
MachineInstrIndex instrIdx =
|
||||
LI.getInstructionIndex(--SI->second->getFirstTerminator());
|
||||
if (Int.liveAt(LiveIntervals::getDefIndex(instrIdx)))
|
||||
Int.removeRange(LiveIntervals::getDefIndex(instrIdx),
|
||||
LI.getMBBEndIdx(SI->second)+1, true);
|
||||
LI.getNextSlot(LI.getMBBEndIdx(SI->second)), true);
|
||||
|
||||
LiveRange R = LI.addLiveRangeToEndOfBlock(I->first,
|
||||
--SI->second->getFirstTerminator());
|
||||
@ -1012,7 +1012,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
if (PI.containsOneValue()) {
|
||||
LI.removeInterval(DestReg);
|
||||
} else {
|
||||
unsigned idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
|
||||
MachineInstrIndex idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
|
||||
PI.removeRange(*PI.getLiveRangeContaining(idx), true);
|
||||
}
|
||||
} else {
|
||||
@ -1026,8 +1026,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
LiveInterval& InputI = LI.getInterval(reg);
|
||||
if (MBB != PInstr->getParent() &&
|
||||
InputI.liveAt(LI.getMBBStartIdx(PInstr->getParent())) &&
|
||||
InputI.expiredAt(LI.getInstructionIndex(PInstr) +
|
||||
LiveInterval::InstrSlots::NUM))
|
||||
InputI.expiredAt(LI.getNextIndex(LI.getInstructionIndex(PInstr))))
|
||||
InputI.removeRange(LI.getMBBStartIdx(PInstr->getParent()),
|
||||
LI.getInstructionIndex(PInstr),
|
||||
true);
|
||||
@ -1035,7 +1034,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
|
||||
// If the PHI is not dead, then the valno defined by the PHI
|
||||
// now has an unknown def.
|
||||
unsigned idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
|
||||
MachineInstrIndex idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
|
||||
const LiveRange* PLR = PI.getLiveRangeContaining(idx);
|
||||
PLR->valno->setIsPHIDef(true);
|
||||
LiveRange R (LI.getMBBStartIdx(PInstr->getParent()),
|
||||
|
@ -18,6 +18,7 @@
|
||||
#define LLVM_CODEGEN_VIRTREGMAP_H
|
||||
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
@ -79,7 +80,7 @@ namespace llvm {
|
||||
|
||||
/// Virt2SplitKillMap - This is splitted virtual register to its last use
|
||||
/// (kill) index mapping.
|
||||
IndexedMap<unsigned> Virt2SplitKillMap;
|
||||
IndexedMap<MachineInstrIndex> Virt2SplitKillMap;
|
||||
|
||||
/// ReMatMap - This is virtual register to re-materialized instruction
|
||||
/// mapping. Each virtual register whose definition is going to be
|
||||
@ -141,7 +142,7 @@ namespace llvm {
|
||||
VirtRegMap() : MachineFunctionPass(&ID), Virt2PhysMap(NO_PHYS_REG),
|
||||
Virt2StackSlotMap(NO_STACK_SLOT),
|
||||
Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
|
||||
Virt2SplitKillMap(0), ReMatMap(NULL),
|
||||
Virt2SplitKillMap(MachineInstrIndex()), ReMatMap(NULL),
|
||||
ReMatId(MAX_STACK_SLOT+1),
|
||||
LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { }
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF);
|
||||
@ -265,17 +266,17 @@ namespace llvm {
|
||||
}
|
||||
|
||||
/// @brief record the last use (kill) of a split virtual register.
|
||||
void addKillPoint(unsigned virtReg, unsigned index) {
|
||||
void addKillPoint(unsigned virtReg, MachineInstrIndex index) {
|
||||
Virt2SplitKillMap[virtReg] = index;
|
||||
}
|
||||
|
||||
unsigned getKillPoint(unsigned virtReg) const {
|
||||
MachineInstrIndex getKillPoint(unsigned virtReg) const {
|
||||
return Virt2SplitKillMap[virtReg];
|
||||
}
|
||||
|
||||
/// @brief remove the last use (kill) of a split virtual register.
|
||||
void removeKillPoint(unsigned virtReg) {
|
||||
Virt2SplitKillMap[virtReg] = 0;
|
||||
Virt2SplitKillMap[virtReg] = MachineInstrIndex();
|
||||
}
|
||||
|
||||
/// @brief returns true if the specified MachineInstr is a spill point.
|
||||
|
Loading…
x
Reference in New Issue
Block a user