2004-07-23 17:56:30 +00:00
|
|
|
//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 19:59:42 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2004-07-19 02:13:59 +00:00
|
|
|
// This file implements the LiveInterval analysis pass. Given some numbering of
|
|
|
|
// each the machine instructions (in this implemention depth-first order) an
|
|
|
|
// interval [i, j) is said to be a live interval for register v if there is no
|
2008-03-13 23:04:27 +00:00
|
|
|
// instruction with number j' > j such that v is live at j' and there is no
|
2004-07-19 02:13:59 +00:00
|
|
|
// instruction with number i' < i such that v is live at i'. In this
|
|
|
|
// implementation intervals can have holes, i.e. an interval might look like
|
|
|
|
// [1,20), [50,65), [1000,1001).
|
2003-11-20 03:32:25 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2004-07-23 17:56:30 +00:00
|
|
|
#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
|
|
|
|
#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
|
2003-11-20 03:32:25 +00:00
|
|
|
|
2009-08-19 20:52:54 +00:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2003-11-20 03:32:25 +00:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2005-09-21 04:18:25 +00:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2009-11-03 23:52:08 +00:00
|
|
|
#include "llvm/CodeGen/SlotIndexes.h"
|
2007-02-15 05:59:24 +00:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2007-04-17 20:32:26 +00:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2009-01-07 02:08:57 +00:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2007-08-13 23:45:17 +00:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2007-09-05 21:46:51 +00:00
|
|
|
#include "llvm/Support/Allocator.h"
|
2007-11-13 23:04:28 +00:00
|
|
|
#include <cmath>
|
2009-11-03 23:52:08 +00:00
|
|
|
#include <iterator>
|
2003-11-20 03:32:25 +00:00
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2008-07-25 00:02:30 +00:00
|
|
|
class AliasAnalysis;
|
2004-08-04 09:46:26 +00:00
|
|
|
class LiveVariables;
|
2007-12-11 02:09:15 +00:00
|
|
|
class MachineLoopInfo;
|
2008-02-10 18:45:23 +00:00
|
|
|
class TargetRegisterInfo;
|
2007-12-31 04:13:23 +00:00
|
|
|
class MachineRegisterInfo;
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
class TargetInstrInfo;
|
2007-04-17 20:32:26 +00:00
|
|
|
class TargetRegisterClass;
|
2004-08-04 09:46:26 +00:00
|
|
|
class VirtRegMap;
|
2008-08-13 22:08:30 +00:00
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
class LiveIntervals : public MachineFunctionPass {
|
|
|
|
MachineFunction* mf_;
|
2008-02-22 09:24:50 +00:00
|
|
|
MachineRegisterInfo* mri_;
|
2004-08-04 09:46:26 +00:00
|
|
|
const TargetMachine* tm_;
|
2008-02-10 18:45:23 +00:00
|
|
|
const TargetRegisterInfo* tri_;
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
const TargetInstrInfo* tii_;
|
2008-07-25 00:02:30 +00:00
|
|
|
AliasAnalysis *aa_;
|
2004-08-04 09:46:26 +00:00
|
|
|
LiveVariables* lv_;
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndexes* indexes_;
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-09-05 21:46:51 +00:00
|
|
|
/// Special pool allocator for VNInfo's (LiveInterval val#).
|
|
|
|
///
|
2010-03-30 20:16:45 +00:00
|
|
|
VNInfo::Allocator VNInfoAllocator;
|
2007-09-05 21:46:51 +00:00
|
|
|
|
2008-08-13 22:08:30 +00:00
|
|
|
typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap;
|
2004-08-04 09:46:26 +00:00
|
|
|
Reg2IntervalMap r2iMap_;
|
|
|
|
|
2009-09-14 21:33:42 +00:00
|
|
|
/// allocatableRegs_ - A bit vector of allocatable registers.
|
2007-02-15 05:59:24 +00:00
|
|
|
BitVector allocatableRegs_;
|
2007-03-01 02:03:03 +00:00
|
|
|
|
2009-09-14 21:33:42 +00:00
|
|
|
/// CloneMIs - A list of clones as result of re-materialization.
|
|
|
|
std::vector<MachineInstr*> CloneMIs;
|
2007-08-13 23:45:17 +00:00
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
public:
|
2007-05-06 13:37:16 +00:00
|
|
|
static char ID; // Pass identification, replacement for typeid
|
2010-08-06 18:33:48 +00:00
|
|
|
LiveIntervals() : MachineFunctionPass(ID) {}
|
2007-05-01 21:15:47 +00:00
|
|
|
|
2010-03-01 20:59:38 +00:00
|
|
|
// Calculate the spill weight to assign to a single instruction.
|
|
|
|
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth);
|
2007-11-12 06:35:08 +00:00
|
|
|
|
2010-02-18 21:33:05 +00:00
|
|
|
// After summing the spill weights of all defs and uses, the final weight
|
|
|
|
// should be normalized, dividing the weight of the interval by its size.
|
|
|
|
// This encourages spilling of intervals that are large and have few uses,
|
|
|
|
// and discourages spilling of small intervals with many uses.
|
|
|
|
void normalizeSpillWeight(LiveInterval &li) {
|
|
|
|
li.weight /= getApproximateInstructionCount(li) + 25;
|
|
|
|
}
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
typedef Reg2IntervalMap::iterator iterator;
|
2004-09-30 15:59:17 +00:00
|
|
|
typedef Reg2IntervalMap::const_iterator const_iterator;
|
|
|
|
const_iterator begin() const { return r2iMap_.begin(); }
|
|
|
|
const_iterator end() const { return r2iMap_.end(); }
|
2004-08-04 09:46:26 +00:00
|
|
|
iterator begin() { return r2iMap_.begin(); }
|
|
|
|
iterator end() { return r2iMap_.end(); }
|
2008-05-05 18:30:58 +00:00
|
|
|
unsigned getNumIntervals() const { return (unsigned)r2iMap_.size(); }
|
2004-08-04 09:46:26 +00:00
|
|
|
|
|
|
|
LiveInterval &getInterval(unsigned reg) {
|
|
|
|
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
|
|
|
|
assert(I != r2iMap_.end() && "Interval does not exist for register");
|
2008-08-13 21:49:13 +00:00
|
|
|
return *I->second;
|
2004-08-04 09:46:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const LiveInterval &getInterval(unsigned reg) const {
|
|
|
|
Reg2IntervalMap::const_iterator I = r2iMap_.find(reg);
|
|
|
|
assert(I != r2iMap_.end() && "Interval does not exist for register");
|
2008-08-13 21:49:13 +00:00
|
|
|
return *I->second;
|
2004-08-04 09:46:26 +00:00
|
|
|
}
|
|
|
|
|
2007-02-19 21:49:54 +00:00
|
|
|
bool hasInterval(unsigned reg) const {
|
2007-03-01 02:03:03 +00:00
|
|
|
return r2iMap_.count(reg);
|
2007-02-19 21:49:54 +00:00
|
|
|
}
|
|
|
|
|
2010-08-10 00:02:26 +00:00
|
|
|
/// isAllocatable - is the physical register reg allocatable in the current
|
|
|
|
/// function?
|
|
|
|
bool isAllocatable(unsigned reg) const {
|
|
|
|
return allocatableRegs_.test(reg);
|
|
|
|
}
|
|
|
|
|
2008-07-22 22:46:49 +00:00
|
|
|
/// getScaledIntervalSize - get the size of an interval in "units,"
|
2008-06-23 23:25:37 +00:00
|
|
|
/// where every function is composed of one thousand units. This
|
|
|
|
/// measure scales properly with empty index slots in the function.
|
2008-07-22 22:46:49 +00:00
|
|
|
double getScaledIntervalSize(LiveInterval& I) {
|
2009-11-03 23:52:08 +00:00
|
|
|
return (1000.0 * I.getSize()) / indexes_->getIndexesLength();
|
2008-07-22 22:46:49 +00:00
|
|
|
}
|
2010-04-21 00:44:22 +00:00
|
|
|
|
|
|
|
/// getFuncInstructionCount - Return the number of instructions in the
|
|
|
|
/// current function.
|
|
|
|
unsigned getFuncInstructionCount() {
|
|
|
|
return indexes_->getFunctionSize();
|
|
|
|
}
|
2008-07-22 22:46:49 +00:00
|
|
|
|
|
|
|
/// getApproximateInstructionCount - computes an estimate of the number
|
|
|
|
/// of instructions in a given LiveInterval.
|
|
|
|
unsigned getApproximateInstructionCount(LiveInterval& I) {
|
|
|
|
double IntervalPercentage = getScaledIntervalSize(I) / 1000.0;
|
2009-11-03 23:52:08 +00:00
|
|
|
return (unsigned)(IntervalPercentage * indexes_->getFunctionSize());
|
2008-10-23 20:43:13 +00:00
|
|
|
}
|
|
|
|
|
2009-12-10 17:48:32 +00:00
|
|
|
/// conflictsWithPhysReg - Returns true if the specified register is used or
|
|
|
|
/// defined during the duration of the specified interval. Copies to and
|
|
|
|
/// from li.reg are allowed. This method is only able to analyze simple
|
|
|
|
/// ranges that stay within a single basic block. Anything else is
|
|
|
|
/// considered a conflict.
|
|
|
|
bool conflictsWithPhysReg(const LiveInterval &li, VirtRegMap &vrm,
|
|
|
|
unsigned reg);
|
2007-11-03 07:20:12 +00:00
|
|
|
|
2010-06-24 18:15:01 +00:00
|
|
|
/// conflictsWithAliasRef - Similar to conflictsWithPhysRegRef except
|
|
|
|
/// it checks for alias uses and defs.
|
|
|
|
bool conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
|
2010-03-11 08:20:21 +00:00
|
|
|
SmallPtrSet<MachineInstr*,32> &JoinedCopies);
|
2009-01-07 02:08:57 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
// Interval creation
|
|
|
|
LiveInterval &getOrCreateInterval(unsigned reg) {
|
|
|
|
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
|
|
|
|
if (I == r2iMap_.end())
|
2008-08-13 22:08:30 +00:00
|
|
|
I = r2iMap_.insert(std::make_pair(reg, createInterval(reg))).first;
|
2008-08-13 21:49:13 +00:00
|
|
|
return *I->second;
|
2007-06-08 17:18:56 +00:00
|
|
|
}
|
2009-02-08 11:04:35 +00:00
|
|
|
|
|
|
|
/// dupInterval - Duplicate a live interval. The caller is responsible for
|
|
|
|
/// managing the allocated memory.
|
|
|
|
LiveInterval *dupInterval(LiveInterval *li);
|
2008-06-05 17:15:43 +00:00
|
|
|
|
|
|
|
/// addLiveRangeToEndOfBlock - Given a register and an instruction,
|
|
|
|
/// adds a live range from that instruction to the end of its MBB.
|
|
|
|
LiveRange addLiveRangeToEndOfBlock(unsigned reg,
|
2009-09-04 20:41:11 +00:00
|
|
|
MachineInstr* startInst);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
// Interval removal
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
void removeInterval(unsigned Reg) {
|
2008-08-13 22:08:30 +00:00
|
|
|
DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.find(Reg);
|
2008-08-13 21:49:13 +00:00
|
|
|
delete I->second;
|
|
|
|
r2iMap_.erase(I);
|
2006-12-17 05:15:13 +00:00
|
|
|
}
|
2004-09-30 15:59:17 +00:00
|
|
|
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex getZeroIndex() const {
|
|
|
|
return indexes_->getZeroIndex();
|
|
|
|
}
|
|
|
|
|
|
|
|
SlotIndex getInvalidIndex() const {
|
|
|
|
return indexes_->getInvalidIndex();
|
|
|
|
}
|
|
|
|
|
2009-04-21 22:46:52 +00:00
|
|
|
/// isNotInMIMap - returns true if the specified machine instr has been
|
|
|
|
/// removed or was never entered in the map.
|
2009-11-03 23:52:08 +00:00
|
|
|
bool isNotInMIMap(const MachineInstr* Instr) const {
|
|
|
|
return !indexes_->hasIndex(Instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the base index of the given instruction.
|
|
|
|
SlotIndex getInstructionIndex(const MachineInstr *instr) const {
|
|
|
|
return indexes_->getInstructionIndex(instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the instruction associated with the given index.
|
|
|
|
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
|
|
|
|
return indexes_->getInstructionFromIndex(index);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the first index in the given basic block.
|
|
|
|
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
|
|
|
|
return indexes_->getMBBStartIdx(mbb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the last index in the given basic block.
|
|
|
|
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
|
|
|
|
return indexes_->getMBBEndIdx(mbb);
|
|
|
|
}
|
|
|
|
|
2010-07-17 07:34:01 +00:00
|
|
|
bool isLiveInToMBB(const LiveInterval &li,
|
|
|
|
const MachineBasicBlock *mbb) const {
|
|
|
|
return li.liveAt(getMBBStartIdx(mbb));
|
|
|
|
}
|
|
|
|
|
|
|
|
LiveRange* findEnteringRange(LiveInterval &li,
|
|
|
|
const MachineBasicBlock *mbb) {
|
|
|
|
return li.getLiveRangeContaining(getMBBStartIdx(mbb));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isLiveOutOfMBB(const LiveInterval &li,
|
|
|
|
const MachineBasicBlock *mbb) const {
|
|
|
|
return li.liveAt(getMBBEndIdx(mbb).getPrevSlot());
|
|
|
|
}
|
|
|
|
|
|
|
|
LiveRange* findExitingRange(LiveInterval &li,
|
|
|
|
const MachineBasicBlock *mbb) {
|
|
|
|
return li.getLiveRangeContaining(getMBBEndIdx(mbb).getPrevSlot());
|
|
|
|
}
|
|
|
|
|
2009-11-03 23:52:08 +00:00
|
|
|
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
|
|
|
|
return indexes_->getMBBFromIndex(index);
|
|
|
|
}
|
|
|
|
|
2009-12-09 05:39:12 +00:00
|
|
|
SlotIndex getMBBTerminatorGap(const MachineBasicBlock *mbb) {
|
|
|
|
return indexes_->getTerminatorGap(mbb);
|
|
|
|
}
|
|
|
|
|
2009-11-14 00:02:51 +00:00
|
|
|
SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) {
|
|
|
|
return indexes_->insertMachineInstrInMaps(MI);
|
2007-02-22 23:03:39 +00:00
|
|
|
}
|
|
|
|
|
2006-08-24 22:43:55 +00:00
|
|
|
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
|
2009-11-03 23:52:08 +00:00
|
|
|
indexes_->removeMachineInstrFromMaps(MI);
|
2006-08-24 22:43:55 +00:00
|
|
|
}
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2008-02-13 03:01:43 +00:00
|
|
|
void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
|
2009-11-03 23:52:08 +00:00
|
|
|
indexes_->replaceMachineInstrInMaps(MI, NewMI);
|
|
|
|
}
|
|
|
|
|
2010-07-17 07:34:01 +00:00
|
|
|
void InsertMBBInMaps(MachineBasicBlock *MBB) {
|
|
|
|
indexes_->insertMBBInMaps(MBB);
|
|
|
|
}
|
|
|
|
|
2009-11-03 23:52:08 +00:00
|
|
|
bool findLiveInMBBs(SlotIndex Start, SlotIndex End,
|
|
|
|
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
|
|
|
|
return indexes_->findLiveInMBBs(Start, End, MBBs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void renumber() {
|
2009-11-14 00:02:51 +00:00
|
|
|
indexes_->renumberIndexes();
|
2008-02-13 03:01:43 +00:00
|
|
|
}
|
|
|
|
|
2010-03-30 20:16:45 +00:00
|
|
|
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
|
2007-09-05 21:46:51 +00:00
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
|
|
|
|
virtual void releaseMemory();
|
|
|
|
|
|
|
|
/// runOnMachineFunction - pass entry point
|
|
|
|
virtual bool runOnMachineFunction(MachineFunction&);
|
|
|
|
|
|
|
|
/// print - Implement the dump method.
|
2009-08-23 06:03:38 +00:00
|
|
|
virtual void print(raw_ostream &O, const Module* = 0) const;
|
2007-06-08 17:18:56 +00:00
|
|
|
|
2007-11-12 06:35:08 +00:00
|
|
|
/// addIntervalsForSpills - Create new intervals for spilled defs / uses of
|
2008-06-06 07:54:39 +00:00
|
|
|
/// the given interval. FIXME: It also returns the weight of the spill slot
|
|
|
|
/// (if any is created) by reference. This is temporary.
|
2007-11-12 06:35:08 +00:00
|
|
|
std::vector<LiveInterval*>
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
addIntervalsForSpills(const LiveInterval& i,
|
2008-09-30 15:44:16 +00:00
|
|
|
SmallVectorImpl<LiveInterval*> &SpillIs,
|
2009-05-03 18:32:42 +00:00
|
|
|
const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
|
2007-11-12 06:35:08 +00:00
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
|
2009-03-23 18:24:37 +00:00
|
|
|
/// around all defs and uses of the specified interval. Return true if it
|
|
|
|
/// was able to cut its interval.
|
|
|
|
bool spillPhysRegAroundRegDefsUses(const LiveInterval &li,
|
2008-03-11 07:19:34 +00:00
|
|
|
unsigned PhysReg, VirtRegMap &vrm);
|
|
|
|
|
2007-12-06 00:01:56 +00:00
|
|
|
/// isReMaterializable - Returns true if every definition of MI of every
|
|
|
|
/// val# of the specified interval is re-materializable. Also returns true
|
|
|
|
/// by reference if all of the defs are load instructions.
|
2008-09-30 15:44:16 +00:00
|
|
|
bool isReMaterializable(const LiveInterval &li,
|
|
|
|
SmallVectorImpl<LiveInterval*> &SpillIs,
|
|
|
|
bool &isLoad);
|
2007-12-06 00:01:56 +00:00
|
|
|
|
2008-10-24 02:05:00 +00:00
|
|
|
/// isReMaterializable - Returns true if the definition MI of the specified
|
|
|
|
/// val# of the specified interval is re-materializable.
|
|
|
|
bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
|
|
|
|
MachineInstr *MI);
|
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// getRepresentativeReg - Find the largest super register of the specified
|
|
|
|
/// physical register.
|
|
|
|
unsigned getRepresentativeReg(unsigned Reg) const;
|
|
|
|
|
|
|
|
/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
|
|
|
|
/// specified interval that conflicts with the specified physical register.
|
|
|
|
unsigned getNumConflictsWithPhysReg(const LiveInterval &li,
|
|
|
|
unsigned PhysReg) const;
|
|
|
|
|
2009-01-13 06:05:10 +00:00
|
|
|
/// intervalIsInOneMBB - Returns true if the specified interval is entirely
|
|
|
|
/// within a single basic block.
|
|
|
|
bool intervalIsInOneMBB(const LiveInterval &li) const;
|
|
|
|
|
2007-06-08 17:18:56 +00:00
|
|
|
private:
|
2006-09-15 03:57:23 +00:00
|
|
|
/// computeIntervals - Compute live intervals.
|
2006-09-14 06:42:17 +00:00
|
|
|
void computeIntervals();
|
2009-09-14 21:33:42 +00:00
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
/// handleRegisterDef - update intervals for a register def
|
|
|
|
/// (calls handlePhysicalRegisterDef and
|
|
|
|
/// handleVirtualRegisterDef)
|
2006-09-03 08:07:11 +00:00
|
|
|
void handleRegisterDef(MachineBasicBlock *MBB,
|
2009-09-04 20:41:11 +00:00
|
|
|
MachineBasicBlock::iterator MI,
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex MIIdx,
|
2008-07-10 07:35:43 +00:00
|
|
|
MachineOperand& MO, unsigned MOIdx);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2010-05-05 18:27:40 +00:00
|
|
|
/// isPartialRedef - Return true if the specified def at the specific index
|
|
|
|
/// is partially re-defining the specified live interval. A common case of
|
|
|
|
/// this is a definition of the sub-register.
|
|
|
|
bool isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
|
|
|
|
LiveInterval &interval);
|
|
|
|
|
2004-08-04 09:46:26 +00:00
|
|
|
/// handleVirtualRegisterDef - update intervals for a virtual
|
|
|
|
/// register def
|
2006-09-03 08:07:11 +00:00
|
|
|
void handleVirtualRegisterDef(MachineBasicBlock *MBB,
|
|
|
|
MachineBasicBlock::iterator MI,
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex MIIdx, MachineOperand& MO,
|
2009-09-04 20:41:11 +00:00
|
|
|
unsigned MOIdx,
|
|
|
|
LiveInterval& interval);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.
The alpha backend currently generates code that looks like this:
vreg = preg
...
preg = vreg
use preg
...
preg = vreg
use preg
etc. Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.
In the case of the Alpha, this allows this testcase:
void "foo"(int %blah) {
store int 5, int *%MyVar
store int 12, int* %MyVar2
ret void
}
to compile to:
foo:
ldgp $29, 0($27)
ldiq $0,5
stl $0,MyVar
ldiq $0,12
stl $0,MyVar2
ret $31,($26),1
instead of:
foo:
ldgp $29, 0($27)
bis $29,$29,$0
ldiq $1,5
bis $0,$0,$29
stl $1,MyVar
ldiq $1,12
bis $0,$0,$29
stl $1,MyVar2
ret $31,($26),1
This does not seem to have any noticable effect on X86 code.
This fixes PR535.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@20536 91177308-0d34-0410-b5e6-96231b3b80d8
2005-03-09 23:05:19 +00:00
|
|
|
/// handlePhysicalRegisterDef - update intervals for a physical register
|
2006-08-24 22:43:55 +00:00
|
|
|
/// def.
|
2004-08-04 09:46:26 +00:00
|
|
|
void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
|
|
|
|
MachineBasicBlock::iterator mi,
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex MIIdx, MachineOperand& MO,
|
2006-08-31 05:54:43 +00:00
|
|
|
LiveInterval &interval,
|
2008-02-15 18:24:29 +00:00
|
|
|
MachineInstr *CopyMI);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2007-02-19 21:49:54 +00:00
|
|
|
/// handleLiveInRegister - Create interval for a livein register.
|
2007-02-21 22:41:17 +00:00
|
|
|
void handleLiveInRegister(MachineBasicBlock* mbb,
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex MIIdx,
|
2007-04-25 07:30:23 +00:00
|
|
|
LiveInterval &interval, bool isAlias = false);
|
2007-02-19 21:49:54 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// getReMatImplicitUse - If the remat definition MI has one (for now, we
|
|
|
|
/// only allow one) virtual register operand, then its uses are implicitly
|
|
|
|
/// using the register. Returns the virtual register.
|
|
|
|
unsigned getReMatImplicitUse(const LiveInterval &li,
|
|
|
|
MachineInstr *MI) const;
|
|
|
|
|
|
|
|
/// isValNoAvailableAt - Return true if the val# of the specified interval
|
|
|
|
/// which reaches the given instruction also reaches the specified use
|
|
|
|
/// index.
|
|
|
|
bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex UseIdx) const;
|
2008-02-22 09:24:50 +00:00
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
/// isReMaterializable - Returns true if the definition MI of the specified
|
2007-12-06 00:01:56 +00:00
|
|
|
/// val# of the specified interval is re-materializable. Also returns true
|
|
|
|
/// by reference if the def is a load.
|
2007-08-29 20:45:00 +00:00
|
|
|
bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
|
2008-09-30 15:44:16 +00:00
|
|
|
MachineInstr *MI,
|
|
|
|
SmallVectorImpl<LiveInterval*> &SpillIs,
|
|
|
|
bool &isLoad);
|
2007-08-13 23:45:17 +00:00
|
|
|
|
2007-08-30 05:52:20 +00:00
|
|
|
/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
|
|
|
|
/// slot / to reg or any rematerialized load into ith operand of specified
|
|
|
|
/// MI. If it is successul, MI is updated with the newly created MI and
|
|
|
|
/// returns true.
|
|
|
|
bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
|
2009-11-03 23:52:08 +00:00
|
|
|
MachineInstr *DefMI, SlotIndex InstrIdx,
|
2007-12-02 08:30:39 +00:00
|
|
|
SmallVector<unsigned, 2> &Ops,
|
2009-09-04 20:41:11 +00:00
|
|
|
bool isSS, int FrameIndex, unsigned Reg);
|
2007-08-13 23:45:17 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// canFoldMemoryOperand - Return true if the specified load / store
|
2007-12-05 03:22:34 +00:00
|
|
|
/// folding is possible.
|
2007-12-05 03:14:33 +00:00
|
|
|
bool canFoldMemoryOperand(MachineInstr *MI,
|
2008-02-25 08:50:41 +00:00
|
|
|
SmallVector<unsigned, 2> &Ops,
|
|
|
|
bool ReMatLoadSS) const;
|
2007-12-05 03:14:33 +00:00
|
|
|
|
2007-11-29 01:06:25 +00:00
|
|
|
/// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
|
|
|
|
/// VNInfo that's after the specified index but is within the basic block.
|
|
|
|
bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI,
|
2009-09-04 20:41:11 +00:00
|
|
|
MachineBasicBlock *MBB,
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex Idx) const;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// hasAllocatableSuperReg - Return true if the specified physical register
|
|
|
|
/// has any super register that's allocatable.
|
|
|
|
bool hasAllocatableSuperReg(unsigned Reg) const;
|
|
|
|
|
2007-11-29 10:12:14 +00:00
|
|
|
/// SRInfo - Spill / restore info.
|
|
|
|
struct SRInfo {
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex index;
|
2007-11-29 10:12:14 +00:00
|
|
|
unsigned vreg;
|
|
|
|
bool canFold;
|
2009-11-03 23:52:08 +00:00
|
|
|
SRInfo(SlotIndex i, unsigned vr, bool f)
|
2009-09-04 20:41:11 +00:00
|
|
|
: index(i), vreg(vr), canFold(f) {}
|
2007-11-29 10:12:14 +00:00
|
|
|
};
|
|
|
|
|
2009-11-03 23:52:08 +00:00
|
|
|
bool alsoFoldARestore(int Id, SlotIndex index, unsigned vr,
|
2007-11-29 10:12:14 +00:00
|
|
|
BitVector &RestoreMBBs,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
2009-11-03 23:52:08 +00:00
|
|
|
void eraseRestoreInfo(int Id, SlotIndex index, unsigned vr,
|
2007-11-29 10:12:14 +00:00
|
|
|
BitVector &RestoreMBBs,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
2007-11-29 10:12:14 +00:00
|
|
|
|
2008-04-11 17:53:36 +00:00
|
|
|
/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
|
|
|
|
/// spilled and create empty intervals for their uses.
|
|
|
|
void handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
|
|
|
|
const TargetRegisterClass* rc,
|
|
|
|
std::vector<LiveInterval*> &NewLIs);
|
2008-04-03 16:39:43 +00:00
|
|
|
|
2008-02-22 09:24:50 +00:00
|
|
|
/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
|
|
|
|
/// interval on to-be re-materialized operands of MI) with new register.
|
|
|
|
void rewriteImplicitOps(const LiveInterval &li,
|
|
|
|
MachineInstr *MI, unsigned NewVReg, VirtRegMap &vrm);
|
|
|
|
|
2007-12-31 04:13:23 +00:00
|
|
|
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper
|
|
|
|
/// functions for addIntervalsForSpills to rewrite uses / defs for the given
|
|
|
|
/// live range.
|
2008-02-22 09:24:50 +00:00
|
|
|
bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
2009-11-03 23:52:08 +00:00
|
|
|
bool TrySplit, SlotIndex index, SlotIndex end,
|
2009-09-04 20:41:11 +00:00
|
|
|
MachineInstr *MI, MachineInstr *OrigDefMI, MachineInstr *DefMI,
|
|
|
|
unsigned Slot, int LdSlot,
|
2007-11-12 06:35:08 +00:00
|
|
|
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
2008-02-22 09:24:50 +00:00
|
|
|
VirtRegMap &vrm, const TargetRegisterClass* rc,
|
|
|
|
SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
|
2008-02-23 00:46:11 +00:00
|
|
|
unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,unsigned> &MBBVRegsMap,
|
2009-05-03 18:32:42 +00:00
|
|
|
std::vector<LiveInterval*> &NewLIs);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
2007-11-12 06:35:08 +00:00
|
|
|
LiveInterval::Ranges::const_iterator &I,
|
|
|
|
MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
|
|
|
|
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
2008-02-22 09:24:50 +00:00
|
|
|
VirtRegMap &vrm, const TargetRegisterClass* rc,
|
2007-12-11 02:09:15 +00:00
|
|
|
SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44198 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-17 00:40:40 +00:00
|
|
|
BitVector &SpillMBBs,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> > &SpillIdxes,
|
2007-11-29 01:06:25 +00:00
|
|
|
BitVector &RestoreMBBs,
|
2008-08-13 22:28:50 +00:00
|
|
|
DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes,
|
|
|
|
DenseMap<unsigned,unsigned> &MBBVRegsMap,
|
2009-05-03 18:32:42 +00:00
|
|
|
std::vector<LiveInterval*> &NewLIs);
|
2007-11-12 06:35:08 +00:00
|
|
|
|
2010-02-18 21:33:05 +00:00
|
|
|
// Normalize the spill weight of all the intervals in NewLIs.
|
|
|
|
void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs);
|
|
|
|
|
2008-08-13 21:49:13 +00:00
|
|
|
static LiveInterval* createInterval(unsigned Reg);
|
2004-08-04 09:46:26 +00:00
|
|
|
|
2009-09-14 21:33:42 +00:00
|
|
|
void printInstrs(raw_ostream &O) const;
|
|
|
|
void dumpInstrs() const;
|
2004-08-04 09:46:26 +00:00
|
|
|
};
|
2003-11-20 03:32:25 +00:00
|
|
|
} // End llvm namespace
|
|
|
|
|
|
|
|
#endif
|