diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h new file mode 100644 index 00000000000..b2f0416312a --- /dev/null +++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h @@ -0,0 +1,211 @@ +//===-- llvm/CodeGen/LiveInterval.h - Live Interval Analysis ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LiveInterval analysis pass. Given some +// numbering of each the machine instructions (in this implemention +// depth-first order) an interval [i, j] is said to be a live interval +// for register v if there is no instruction with number j' > j such +// that v is live at j' abd there is no instruction with number i' < i +// such that v is live at i'. In this implementation intervals can +// have holes, i.e. an interval might look like [1,20], [50,65], +// [1000,1001] +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LIVEINTERVALS_H +#define LLVM_CODEGEN_LIVEINTERVALS_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include +#include + +namespace llvm { + + class LiveVariables; + class MRegisterInfo; + + class LiveIntervals : public MachineFunctionPass + { + public: + struct Interval { + typedef std::pair Range; + typedef std::vector Ranges; + unsigned reg; // the register of this interval + Ranges ranges; // the ranges this register is valid + + Interval(unsigned r) + : reg(r) { + + } + + unsigned start() const { + assert(!ranges.empty() && "empty interval for register"); + return ranges.front().first; + } + + unsigned end() const { + assert(!ranges.empty() && "empty interval for register"); + return ranges.back().second; + } + + bool expired(unsigned index) const { + return end() <= index; + } + + bool overlaps(unsigned index) const { + for (Ranges::const_iterator + i = ranges.begin(), e = ranges.end(); i != e; ++i) { + if (index >= i->first && index < i->second) { + return true; + } + } + return false; + } + + void addRange(unsigned start, unsigned end) { + Range range = std::make_pair(start, end); + Ranges::iterator it = + std::lower_bound(ranges.begin(), ranges.end(), range); + + if (it == ranges.end()) { + it = ranges.insert(it, range); + goto exit; + } + + assert(range.first <= it->first && "got wrong iterator?"); + // merge ranges if necesary + if (range.first < it->first) { + if (range.second >= it->first) { + it->first = range.first; + } + else { + it = ranges.insert(it, range); + assert(it != ranges.end() && "wtf?"); + goto exit; + } + } + + exit: + mergeRangesIfNecessary(it); + } + + private: + void mergeRangesIfNecessary(Ranges::iterator it) { + while (it != ranges.begin()) { + Ranges::iterator prev = it - 1; + if (prev->second < it->first) { + break; + } + prev->second = it->second; + ranges.erase(it); + it = prev; + } + } + }; + + struct StartPointComp { + bool operator()(const Interval& lhs, const Interval& rhs) { + return lhs.ranges.front().first < rhs.ranges.front().first; + } + }; + + struct EndPointComp { + bool operator()(const Interval& lhs, const Interval& rhs) { + return lhs.ranges.back().second < rhs.ranges.back().second; + } + }; + + typedef std::vector Intervals; + typedef std::vector MachineBasicBlockPtrs; + + private: + MachineFunction* mf_; + const TargetMachine* tm_; + const MRegisterInfo* mri_; + MachineBasicBlock* currentMbb_; + MachineBasicBlock::iterator currentInstr_; + LiveVariables* lv_; + + std::vector allocatableRegisters_; + + typedef std::map MbbIndex2MbbMap; + MbbIndex2MbbMap mbbi2mbbMap_; + + typedef std::map Mi2IndexMap; + Mi2IndexMap mi2iMap_; + + typedef std::map Reg2IntervalMap; + Reg2IntervalMap r2iMap_; + + Intervals intervals_; + + public: + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + Intervals& getIntervals() { return intervals_; } + MachineBasicBlockPtrs getOrderedMachineBasicBlockPtrs() const { + MachineBasicBlockPtrs result; + for (MbbIndex2MbbMap::const_iterator + it = mbbi2mbbMap_.begin(), itEnd = mbbi2mbbMap_.end(); + it != itEnd; ++it) { + result.push_back(it->second); + } + return result; + } + + private: + /// runOnMachineFunction - pass entry point + bool runOnMachineFunction(MachineFunction&); + + /// computeIntervals - compute live intervals + void computeIntervals(); + + + /// handleRegisterDef - update intervals for a register def + /// (calls handlePhysicalRegisterDef and + /// handleVirtualRegisterDef) + void handleRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + /// handleVirtualRegisterDef - update intervals for a virtual + /// register def + void handleVirtualRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + /// handlePhysicalRegisterDef - update intervals for a + /// physical register def + void handlePhysicalRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + unsigned getInstructionIndex(MachineInstr* instr) const; + + void printRegName(unsigned reg) const; + }; + + inline bool operator==(const LiveIntervals::Interval& lhs, + const LiveIntervals::Interval& rhs) { + return lhs.reg == rhs.reg; + } + + inline std::ostream& operator<<(std::ostream& os, + const LiveIntervals::Interval& li) { + os << "%reg" << li.reg << " = "; + for (LiveIntervals::Interval::Ranges::const_iterator + i = li.ranges.begin(), e = li.ranges.end(); i != e; ++i) { + os << "[" << i->first << ", " << i->second << "]"; + } + return os; + } + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/LiveIntervals.h b/include/llvm/CodeGen/LiveIntervals.h new file mode 100644 index 00000000000..b2f0416312a --- /dev/null +++ b/include/llvm/CodeGen/LiveIntervals.h @@ -0,0 +1,211 @@ +//===-- llvm/CodeGen/LiveInterval.h - Live Interval Analysis ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LiveInterval analysis pass. Given some +// numbering of each the machine instructions (in this implemention +// depth-first order) an interval [i, j] is said to be a live interval +// for register v if there is no instruction with number j' > j such +// that v is live at j' abd there is no instruction with number i' < i +// such that v is live at i'. In this implementation intervals can +// have holes, i.e. an interval might look like [1,20], [50,65], +// [1000,1001] +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LIVEINTERVALS_H +#define LLVM_CODEGEN_LIVEINTERVALS_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include +#include + +namespace llvm { + + class LiveVariables; + class MRegisterInfo; + + class LiveIntervals : public MachineFunctionPass + { + public: + struct Interval { + typedef std::pair Range; + typedef std::vector Ranges; + unsigned reg; // the register of this interval + Ranges ranges; // the ranges this register is valid + + Interval(unsigned r) + : reg(r) { + + } + + unsigned start() const { + assert(!ranges.empty() && "empty interval for register"); + return ranges.front().first; + } + + unsigned end() const { + assert(!ranges.empty() && "empty interval for register"); + return ranges.back().second; + } + + bool expired(unsigned index) const { + return end() <= index; + } + + bool overlaps(unsigned index) const { + for (Ranges::const_iterator + i = ranges.begin(), e = ranges.end(); i != e; ++i) { + if (index >= i->first && index < i->second) { + return true; + } + } + return false; + } + + void addRange(unsigned start, unsigned end) { + Range range = std::make_pair(start, end); + Ranges::iterator it = + std::lower_bound(ranges.begin(), ranges.end(), range); + + if (it == ranges.end()) { + it = ranges.insert(it, range); + goto exit; + } + + assert(range.first <= it->first && "got wrong iterator?"); + // merge ranges if necesary + if (range.first < it->first) { + if (range.second >= it->first) { + it->first = range.first; + } + else { + it = ranges.insert(it, range); + assert(it != ranges.end() && "wtf?"); + goto exit; + } + } + + exit: + mergeRangesIfNecessary(it); + } + + private: + void mergeRangesIfNecessary(Ranges::iterator it) { + while (it != ranges.begin()) { + Ranges::iterator prev = it - 1; + if (prev->second < it->first) { + break; + } + prev->second = it->second; + ranges.erase(it); + it = prev; + } + } + }; + + struct StartPointComp { + bool operator()(const Interval& lhs, const Interval& rhs) { + return lhs.ranges.front().first < rhs.ranges.front().first; + } + }; + + struct EndPointComp { + bool operator()(const Interval& lhs, const Interval& rhs) { + return lhs.ranges.back().second < rhs.ranges.back().second; + } + }; + + typedef std::vector Intervals; + typedef std::vector MachineBasicBlockPtrs; + + private: + MachineFunction* mf_; + const TargetMachine* tm_; + const MRegisterInfo* mri_; + MachineBasicBlock* currentMbb_; + MachineBasicBlock::iterator currentInstr_; + LiveVariables* lv_; + + std::vector allocatableRegisters_; + + typedef std::map MbbIndex2MbbMap; + MbbIndex2MbbMap mbbi2mbbMap_; + + typedef std::map Mi2IndexMap; + Mi2IndexMap mi2iMap_; + + typedef std::map Reg2IntervalMap; + Reg2IntervalMap r2iMap_; + + Intervals intervals_; + + public: + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + Intervals& getIntervals() { return intervals_; } + MachineBasicBlockPtrs getOrderedMachineBasicBlockPtrs() const { + MachineBasicBlockPtrs result; + for (MbbIndex2MbbMap::const_iterator + it = mbbi2mbbMap_.begin(), itEnd = mbbi2mbbMap_.end(); + it != itEnd; ++it) { + result.push_back(it->second); + } + return result; + } + + private: + /// runOnMachineFunction - pass entry point + bool runOnMachineFunction(MachineFunction&); + + /// computeIntervals - compute live intervals + void computeIntervals(); + + + /// handleRegisterDef - update intervals for a register def + /// (calls handlePhysicalRegisterDef and + /// handleVirtualRegisterDef) + void handleRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + /// handleVirtualRegisterDef - update intervals for a virtual + /// register def + void handleVirtualRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + /// handlePhysicalRegisterDef - update intervals for a + /// physical register def + void handlePhysicalRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + unsigned getInstructionIndex(MachineInstr* instr) const; + + void printRegName(unsigned reg) const; + }; + + inline bool operator==(const LiveIntervals::Interval& lhs, + const LiveIntervals::Interval& rhs) { + return lhs.reg == rhs.reg; + } + + inline std::ostream& operator<<(std::ostream& os, + const LiveIntervals::Interval& li) { + os << "%reg" << li.reg << " = "; + for (LiveIntervals::Interval::Ranges::const_iterator + i = li.ranges.begin(), e = li.ranges.end(); i != e; ++i) { + os << "[" << i->first << ", " << i->second << "]"; + } + return os; + } + +} // End llvm namespace + +#endif diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h index 11fcf29013a..8868de13a4e 100644 --- a/include/llvm/CodeGen/Passes.h +++ b/include/llvm/CodeGen/Passes.h @@ -44,12 +44,17 @@ FunctionPass *createSimpleRegisterAllocator(); /// FunctionPass *createLocalRegisterAllocator(); +/// LinearScanRegisterAllocation Pass - This pass implements the +/// linear scan register allocation algorithm, a global register +/// allocator. +/// +FunctionPass *createLinearScanRegisterAllocator(); + /// PrologEpilogCodeInserter Pass - This pass inserts prolog and epilog code, /// and eliminates abstract frame references. /// FunctionPass *createPrologEpilogCodeInserter(); - /// getRegisterAllocator - This creates an instance of the register allocator /// for the Sparc. FunctionPass *getRegisterAllocator(TargetMachine &T); diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp new file mode 100644 index 00000000000..eda717baeb1 --- /dev/null +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -0,0 +1,302 @@ +//===-- LiveIntervals.cpp - Live Interval Analysis ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LiveInterval analysis pass which is used +// by the Linear Scan Register allocator. This pass linearizes the +// basic blocks of the function in DFS order and uses the +// LiveVariables pass to conservatively compute live intervals for +// each virtual and physical register. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "liveintervals" +#include "llvm/CodeGen/LiveIntervals.h" +#include "llvm/Function.h" +#include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/SSARegMap.h" +#include "llvm/Target/MRegisterInfo.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetRegInfo.h" +#include "llvm/Support/CFG.h" +#include "Support/Debug.h" +#include "Support/DepthFirstIterator.h" +#include "Support/Statistic.h" +#include + +using namespace llvm; + +namespace { + RegisterAnalysis X("liveintervals", + "Live Interval Analysis"); + + Statistic<> numIntervals("liveintervals", "Number of intervals"); +}; + +void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const +{ + AU.setPreservesAll(); + AU.addRequired(); + AU.addRequiredID(PHIEliminationID); + MachineFunctionPass::getAnalysisUsage(AU); +} + +/// runOnMachineFunction - Register allocate the whole function +/// +bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { + DEBUG(std::cerr << "Machine Function\n"); + mf_ = &fn; + tm_ = &fn.getTarget(); + mri_ = tm_->getRegisterInfo(); + lv_ = &getAnalysis(); + allocatableRegisters_.clear(); + mbbi2mbbMap_.clear(); + mi2iMap_.clear(); + r2iMap_.clear(); + r2iMap_.clear(); + intervals_.clear(); + + // mark allocatable registers + allocatableRegisters_.resize(MRegisterInfo::FirstVirtualRegister); + // Loop over all of the register classes... + for (MRegisterInfo::regclass_iterator + rci = mri_->regclass_begin(), rce = mri_->regclass_end(); + rci != rce; ++rci) { + // Loop over all of the allocatable registers in the function... + for (TargetRegisterClass::iterator + i = (*rci)->allocation_order_begin(*mf_), + e = (*rci)->allocation_order_end(*mf_); i != e; ++i) { + allocatableRegisters_[*i] = true; // The reg is allocatable! + } + } + + // number MachineInstrs + unsigned miIndex = 0; + for (MachineFunction::iterator mbb = mf_->begin(), mbbEnd = mf_->end(); + mbb != mbbEnd; ++mbb) { + const std::pair& entry = + lv_->getMachineBasicBlockInfo(&*mbb); + bool inserted = mbbi2mbbMap_.insert(std::make_pair(entry.second, + entry.first)).second; + assert(inserted && "multiple index -> MachineBasicBlock"); + + for (MachineBasicBlock::iterator mi = mbb->begin(), miEnd = mbb->end(); + mi != miEnd; ++mi) { + inserted = mi2iMap_.insert(std::make_pair(*mi, miIndex)).second; + assert(inserted && "multiple MachineInstr -> index mappings"); + ++miIndex; + } + } + + computeIntervals(); + + return true; +} + +void LiveIntervals::printRegName(unsigned reg) const +{ + if (reg < MRegisterInfo::FirstVirtualRegister) + std::cerr << mri_->getName(reg); + else + std::cerr << '%' << reg; +} + +void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg) +{ + DEBUG(std::cerr << "\t\t\tregister: ";printRegName(reg); std::cerr << '\n'); + + unsigned instrIndex = getInstructionIndex(*mi); + + LiveVariables::VarInfo& vi = lv_->getVarInfo(reg); + + Reg2IntervalMap::iterator r2iit = r2iMap_.find(reg); + // handle multiple definition case (machine instructions violating + // ssa after phi-elimination + if (r2iit != r2iMap_.end()) { + unsigned ii = r2iit->second; + Interval& interval = intervals_[ii]; + unsigned end = getInstructionIndex(mbb->back()) + 1; + DEBUG(std::cerr << "\t\t\t\tadding range: [" + << instrIndex << ',' << end << "]\n"); + interval.addRange(instrIndex, end); + DEBUG(std::cerr << "\t\t\t\t" << interval << '\n'); + } + else { + // add new interval + intervals_.push_back(Interval(reg)); + Interval& interval = intervals_.back(); + // update interval index for this register + r2iMap_[reg] = intervals_.size() - 1; + + for (MbbIndex2MbbMap::iterator + it = mbbi2mbbMap_.begin(), itEnd = mbbi2mbbMap_.end(); + it != itEnd; ++it) { + unsigned liveBlockIndex = it->first; + MachineBasicBlock* liveBlock = it->second; + if (liveBlockIndex < vi.AliveBlocks.size() && + vi.AliveBlocks[liveBlockIndex]) { + unsigned start = getInstructionIndex(liveBlock->front()); + unsigned end = getInstructionIndex(liveBlock->back()) + 1; + DEBUG(std::cerr << "\t\t\t\tadding range: [" + << start << ',' << end << "]\n"); + interval.addRange(start, end); + } + } + + bool killedInDefiningBasicBlock = false; + for (int i = 0, e = vi.Kills.size(); i != e; ++i) { + MachineBasicBlock* killerBlock = vi.Kills[i].first; + MachineInstr* killerInstr = vi.Kills[i].second; + killedInDefiningBasicBlock |= mbb == killerBlock; + unsigned start = (mbb == killerBlock ? + instrIndex : + getInstructionIndex(killerBlock->front())); + unsigned end = getInstructionIndex(killerInstr) + 1; + DEBUG(std::cerr << "\t\t\t\tadding range: [" + << start << ',' << end << "]\n"); + interval.addRange(start, end); + } + + if (!killedInDefiningBasicBlock) { + unsigned end = getInstructionIndex(mbb->back()) + 1; + interval.addRange(instrIndex, end); + } + + DEBUG(std::cerr << "\t\t\t\t" << interval << '\n'); + } +} + +void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg) +{ + DEBUG(std::cerr << "\t\t\tregister: ";printRegName(reg); std::cerr << '\n'); + + unsigned start = getInstructionIndex(*mi); + unsigned end = start; + + for (MachineBasicBlock::iterator e = mbb->end(); mi != e; ++mi) { + for (LiveVariables::killed_iterator + ki = lv_->dead_begin(*mi), + ke = lv_->dead_end(*mi); + ki != ke; ++ki) { + if (reg == ki->second) { + end = getInstructionIndex(ki->first) + 1; + goto exit; + } + } + + for (LiveVariables::killed_iterator + ki = lv_->killed_begin(*mi), + ke = lv_->killed_end(*mi); + ki != ke; ++ki) { + if (reg == ki->second) { + end = getInstructionIndex(ki->first) + 1; + goto exit; + } + } + } +exit: + assert(start < end && "did not find end of interval?"); + + Reg2IntervalMap::iterator r2iit = r2iMap_.find(reg); + if (r2iit != r2iMap_.end()) { + unsigned ii = r2iit->second; + Interval& interval = intervals_[ii]; + DEBUG(std::cerr << "\t\t\t\tadding range: [" + << start << ',' << end << "]\n"); + interval.addRange(start, end); + DEBUG(std::cerr << "\t\t\t\t" << interval << '\n'); + } + else { + intervals_.push_back(Interval(reg)); + Interval& interval = intervals_.back(); + // update interval index for this register + r2iMap_[reg] = intervals_.size() - 1; + DEBUG(std::cerr << "\t\t\t\tadding range: [" + << start << ',' << end << "]\n"); + interval.addRange(start, end); + DEBUG(std::cerr << "\t\t\t\t" << interval << '\n'); + } +} + +void LiveIntervals::handleRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg) +{ + if (reg < MRegisterInfo::FirstVirtualRegister) { + if (allocatableRegisters_[reg]) { + handlePhysicalRegisterDef(mbb, mi, reg); + } + } + else { + handleVirtualRegisterDef(mbb, mi, reg); + } +} + +unsigned LiveIntervals::getInstructionIndex(MachineInstr* instr) const +{ + assert(mi2iMap_.find(instr) != mi2iMap_.end() && + "instruction not assigned a number"); + return mi2iMap_.find(instr)->second; +} + +/// computeIntervals - computes the live intervals for virtual +/// registers. for some ordering of the machine instructions [1,N] a +/// live interval is an interval [i, j] where 1 <= i <= j <= N for +/// which a variable is live +void LiveIntervals::computeIntervals() +{ + DEBUG(std::cerr << "computing live intervals:\n"); + + for (MbbIndex2MbbMap::iterator + it = mbbi2mbbMap_.begin(), itEnd = mbbi2mbbMap_.end(); + it != itEnd; ++it) { + MachineBasicBlock* mbb = it->second; + DEBUG(std::cerr << "machine basic block: " + << mbb->getBasicBlock()->getName() << "\n"); + for (MachineBasicBlock::iterator mi = mbb->begin(), miEnd = mbb->end(); + mi != miEnd; ++mi) { + MachineInstr* instr = *mi; + const TargetInstrDescriptor& tid = + tm_->getInstrInfo().get(instr->getOpcode()); + DEBUG(std::cerr << "\t\tinstruction[" + << getInstructionIndex(instr) << "]: "; + instr->print(std::cerr, *tm_);); + + // handle implicit defs + for (const unsigned* id = tid.ImplicitDefs; *id; ++id) { + unsigned physReg = *id; + handlePhysicalRegisterDef(mbb, mi, physReg); + } + + // handle explicit defs + for (int i = instr->getNumOperands() - 1; i >= 0; --i) { + MachineOperand& mop = instr->getOperand(i); + + if (!mop.isVirtualRegister()) + continue; + + if (mop.opIsDefOnly() || mop.opIsDefAndUse()) { + unsigned reg = mop.getAllocatedRegNum(); + handleVirtualRegisterDef(mbb, mi, reg); + } + } + } + } + + DEBUG(std::copy(intervals_.begin(), intervals_.end(), + std::ostream_iterator(std::cerr, "\n"))); +} diff --git a/lib/CodeGen/LiveIntervalAnalysis.h b/lib/CodeGen/LiveIntervalAnalysis.h new file mode 100644 index 00000000000..b2f0416312a --- /dev/null +++ b/lib/CodeGen/LiveIntervalAnalysis.h @@ -0,0 +1,211 @@ +//===-- llvm/CodeGen/LiveInterval.h - Live Interval Analysis ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LiveInterval analysis pass. Given some +// numbering of each the machine instructions (in this implemention +// depth-first order) an interval [i, j] is said to be a live interval +// for register v if there is no instruction with number j' > j such +// that v is live at j' abd there is no instruction with number i' < i +// such that v is live at i'. In this implementation intervals can +// have holes, i.e. an interval might look like [1,20], [50,65], +// [1000,1001] +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_LIVEINTERVALS_H +#define LLVM_CODEGEN_LIVEINTERVALS_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include +#include + +namespace llvm { + + class LiveVariables; + class MRegisterInfo; + + class LiveIntervals : public MachineFunctionPass + { + public: + struct Interval { + typedef std::pair Range; + typedef std::vector Ranges; + unsigned reg; // the register of this interval + Ranges ranges; // the ranges this register is valid + + Interval(unsigned r) + : reg(r) { + + } + + unsigned start() const { + assert(!ranges.empty() && "empty interval for register"); + return ranges.front().first; + } + + unsigned end() const { + assert(!ranges.empty() && "empty interval for register"); + return ranges.back().second; + } + + bool expired(unsigned index) const { + return end() <= index; + } + + bool overlaps(unsigned index) const { + for (Ranges::const_iterator + i = ranges.begin(), e = ranges.end(); i != e; ++i) { + if (index >= i->first && index < i->second) { + return true; + } + } + return false; + } + + void addRange(unsigned start, unsigned end) { + Range range = std::make_pair(start, end); + Ranges::iterator it = + std::lower_bound(ranges.begin(), ranges.end(), range); + + if (it == ranges.end()) { + it = ranges.insert(it, range); + goto exit; + } + + assert(range.first <= it->first && "got wrong iterator?"); + // merge ranges if necesary + if (range.first < it->first) { + if (range.second >= it->first) { + it->first = range.first; + } + else { + it = ranges.insert(it, range); + assert(it != ranges.end() && "wtf?"); + goto exit; + } + } + + exit: + mergeRangesIfNecessary(it); + } + + private: + void mergeRangesIfNecessary(Ranges::iterator it) { + while (it != ranges.begin()) { + Ranges::iterator prev = it - 1; + if (prev->second < it->first) { + break; + } + prev->second = it->second; + ranges.erase(it); + it = prev; + } + } + }; + + struct StartPointComp { + bool operator()(const Interval& lhs, const Interval& rhs) { + return lhs.ranges.front().first < rhs.ranges.front().first; + } + }; + + struct EndPointComp { + bool operator()(const Interval& lhs, const Interval& rhs) { + return lhs.ranges.back().second < rhs.ranges.back().second; + } + }; + + typedef std::vector Intervals; + typedef std::vector MachineBasicBlockPtrs; + + private: + MachineFunction* mf_; + const TargetMachine* tm_; + const MRegisterInfo* mri_; + MachineBasicBlock* currentMbb_; + MachineBasicBlock::iterator currentInstr_; + LiveVariables* lv_; + + std::vector allocatableRegisters_; + + typedef std::map MbbIndex2MbbMap; + MbbIndex2MbbMap mbbi2mbbMap_; + + typedef std::map Mi2IndexMap; + Mi2IndexMap mi2iMap_; + + typedef std::map Reg2IntervalMap; + Reg2IntervalMap r2iMap_; + + Intervals intervals_; + + public: + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + Intervals& getIntervals() { return intervals_; } + MachineBasicBlockPtrs getOrderedMachineBasicBlockPtrs() const { + MachineBasicBlockPtrs result; + for (MbbIndex2MbbMap::const_iterator + it = mbbi2mbbMap_.begin(), itEnd = mbbi2mbbMap_.end(); + it != itEnd; ++it) { + result.push_back(it->second); + } + return result; + } + + private: + /// runOnMachineFunction - pass entry point + bool runOnMachineFunction(MachineFunction&); + + /// computeIntervals - compute live intervals + void computeIntervals(); + + + /// handleRegisterDef - update intervals for a register def + /// (calls handlePhysicalRegisterDef and + /// handleVirtualRegisterDef) + void handleRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + /// handleVirtualRegisterDef - update intervals for a virtual + /// register def + void handleVirtualRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + /// handlePhysicalRegisterDef - update intervals for a + /// physical register def + void handlePhysicalRegisterDef(MachineBasicBlock* mbb, + MachineBasicBlock::iterator mi, + unsigned reg); + + unsigned getInstructionIndex(MachineInstr* instr) const; + + void printRegName(unsigned reg) const; + }; + + inline bool operator==(const LiveIntervals::Interval& lhs, + const LiveIntervals::Interval& rhs) { + return lhs.reg == rhs.reg; + } + + inline std::ostream& operator<<(std::ostream& os, + const LiveIntervals::Interval& li) { + os << "%reg" << li.reg << " = "; + for (LiveIntervals::Interval::Ranges::const_iterator + i = li.ranges.begin(), e = li.ranges.end(); i != e; ++i) { + os << "[" << i->first << ", " << i->second << "]"; + } + return os; + } + +} // End llvm namespace + +#endif diff --git a/lib/CodeGen/Passes.cpp b/lib/CodeGen/Passes.cpp index 239eacde4c1..58a5105d4b6 100644 --- a/lib/CodeGen/Passes.cpp +++ b/lib/CodeGen/Passes.cpp @@ -18,14 +18,15 @@ namespace llvm { namespace { - enum RegAllocName { simple, local }; + enum RegAllocName { simple, local, linearscan }; cl::opt RegAlloc("regalloc", cl::desc("Register allocator to use: (default = simple)"), cl::Prefix, - cl::values(clEnumVal(simple, " simple register allocator"), - clEnumVal(local, " local register allocator"), + cl::values(clEnumVal(simple, " simple register allocator"), + clEnumVal(local, " local register allocator"), + clEnumVal(linearscan, " linear-scan global register allocator"), 0), cl::init(local)); } @@ -37,6 +38,8 @@ FunctionPass *createRegisterAllocator() return createSimpleRegisterAllocator(); case local: return createLocalRegisterAllocator(); + case linearscan: + return createLinearScanRegisterAllocator(); default: assert(0 && "no register allocator selected"); return 0; // not reached diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp new file mode 100644 index 00000000000..f78be0f1ca9 --- /dev/null +++ b/lib/CodeGen/RegAllocLinearScan.cpp @@ -0,0 +1,777 @@ +//===-- RegAllocLinearScan.cpp - Linear Scan register allocator -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a linear scan register allocator. +// +//===----------------------------------------------------------------------===// +#define DEBUG_TYPE "regalloc" +#include "llvm/Function.h" +#include "llvm/CodeGen/LiveIntervals.h" +#include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/SSARegMap.h" +#include "llvm/Target/MRegisterInfo.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetRegInfo.h" +#include "llvm/Support/CFG.h" +#include "Support/Debug.h" +#include "Support/DepthFirstIterator.h" +#include "Support/Statistic.h" +#include "Support/STLExtras.h" +#include + +using namespace llvm; + +namespace { + Statistic<> numSpilled ("ra-linearscan", "Number of registers spilled"); + Statistic<> numReloaded("ra-linearscan", "Number of registers reloaded"); + + class RA : public MachineFunctionPass { + public: + typedef std::vector IntervalPtrs; + + private: + MachineFunction* mf_; + const TargetMachine* tm_; + const MRegisterInfo* mri_; + MachineBasicBlock* currentMbb_; + MachineBasicBlock::iterator currentInstr_; + typedef LiveIntervals::Intervals Intervals; + const Intervals* li_; + IntervalPtrs active_, inactive_; + + typedef std::vector Regs; + Regs tempUseOperands_; + Regs tempDefOperands_; + + Regs reserved_; + + typedef LiveIntervals::MachineBasicBlockPtrs MachineBasicBlockPtrs; + MachineBasicBlockPtrs mbbs_; + + typedef std::vector Phys2VirtMap; + Phys2VirtMap p2vMap_; + + typedef std::map Virt2PhysMap; + Virt2PhysMap v2pMap_; + + typedef std::map Virt2StackSlotMap; + Virt2StackSlotMap v2ssMap_; + + int instrAdded_; + + public: + virtual const char* getPassName() const { + return "Linear Scan Register Allocator"; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired(); + AU.addRequired(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + private: + /// runOnMachineFunction - register allocate the whole function + bool runOnMachineFunction(MachineFunction&); + + /// processActiveIntervals - expire old intervals and move + /// non-overlapping ones to the incative list + void processActiveIntervals(Intervals::const_iterator cur); + + /// processInactiveIntervals - expire old intervals and move + /// overlapping ones to the active list + void processInactiveIntervals(Intervals::const_iterator cur); + + /// assignStackSlotAtInterval - choose and spill + /// interval. Currently we spill the interval with the last + /// end point in the active and inactive lists and the current + /// interval + void assignStackSlotAtInterval(Intervals::const_iterator cur); + + /// + /// register handling helpers + /// + + /// reservePhysReg - reserves a physical register and spills + /// any value assigned to it if any + void reservePhysReg(unsigned reg); + + /// clearReservedPhysReg - marks pysical register as free for + /// use + void clearReservedPhysReg(unsigned reg); + + /// physRegAvailable - returns true if the specifed physical + /// register is available + bool physRegAvailable(unsigned physReg); + + /// getFreePhysReg - return a free physical register for this + /// virtual register if we have one, otherwise return 0 + unsigned getFreePhysReg(unsigned virtReg); + + + /// tempPhysRegAvailable - returns true if the specifed + /// temporary physical register is available + bool tempPhysRegAvailable(unsigned physReg); + + /// getFreeTempPhysReg - return a free temprorary physical + /// register for this register class if we have one (should + /// never return 0) + unsigned getFreeTempPhysReg(const TargetRegisterClass* rc); + + /// getFreeTempPhysReg - return a free temprorary physical + /// register for this virtual register if we have one (should + /// never return 0) + unsigned getFreeTempPhysReg(unsigned virtReg) { + const TargetRegisterClass* rc = + mf_->getSSARegMap()->getRegClass(virtReg); + return getFreeTempPhysReg(rc); + } + + /// assignVirt2PhysReg - assigns the free physical register to + /// the virtual register passed as arguments + void assignVirt2PhysReg(unsigned virtReg, unsigned physReg); + + /// clearVirtReg - free the physical register associated with this + /// virtual register and disassociate virtual->physical and + /// physical->virtual mappings + void clearVirtReg(unsigned virtReg); + + /// assignVirt2StackSlot - assigns this virtual register to a + /// stack slot + void assignVirt2StackSlot(unsigned virtReg); + + /// findOrCreateStackSlot - returns the offset of the + /// specified register on the stack allocating space if + /// necessary + int findOrCreateStackSlot(unsigned virtReg); + + /// spillVirtReg - spills the virtual register + void spillVirtReg(unsigned virtReg); + + /// loadPhysReg - loads to the physical register the value of + /// the virtual register specifed. Virtual register must have + /// an assigned stack slot + void loadVirt2PhysReg(unsigned virtReg, unsigned physReg); + + void printVirt2PhysMap() const { + std::cerr << "allocated registers:\n"; + for (Virt2PhysMap::const_iterator + i = v2pMap_.begin(), e = v2pMap_.end(); i != e; ++i) { + std::cerr << '[' << i->first << ',' + << mri_->getName(i->second) << "]\n"; + } + std::cerr << '\n'; + } + void printIntervals(const char* const str, + RA::IntervalPtrs::const_iterator i, + RA::IntervalPtrs::const_iterator e) const { + if (str) std::cerr << str << " intervals:\n"; + for (; i != e; ++i) { + std::cerr << "\t\t" << **i << " -> "; + if ((*i)->reg < MRegisterInfo::FirstVirtualRegister) { + std::cerr << mri_->getName((*i)->reg); + } + else { + std::cerr << mri_->getName(v2pMap_.find((*i)->reg)->second); + } + std::cerr << '\n'; + } + } + }; +} + +bool RA::runOnMachineFunction(MachineFunction &fn) { + mf_ = &fn; + tm_ = &fn.getTarget(); + mri_ = tm_->getRegisterInfo(); + li_ = &getAnalysis().getIntervals(); + active_.clear(); + inactive_.clear(); + mbbs_ = getAnalysis().getOrderedMachineBasicBlockPtrs(); + p2vMap_.resize(MRegisterInfo::FirstVirtualRegister-1); + p2vMap_.clear(); + v2pMap_.clear(); + v2ssMap_.clear(); + + // FIXME: this will work only for the X86 backend. I need to + // device an algorthm to select the minimal (considering register + // aliasing) number of temp registers to reserve so that we have 2 + // registers for each register class available. + + // reserve R32: EDI, EBX, + // R16: DI, BX, + // R8: DH, BH, + // RFP: FP5, FP6 + reserved_.push_back(19); /* EDI */ + reserved_.push_back(17); /* EBX */ + reserved_.push_back(12); /* DI */ + reserved_.push_back( 7); /* BX */ + reserved_.push_back(11); /* DH */ + reserved_.push_back( 4); /* BH */ + reserved_.push_back(28); /* FP5 */ + reserved_.push_back(29); /* FP6 */ + + // liner scan algorithm + for (Intervals::const_iterator + i = li_->begin(), e = li_->end(); i != e; ++i) { + DEBUG(std::cerr << "processing current interval: " << *i << '\n'); + + DEBUG(printIntervals("\tactive", active_.begin(), active_.end())); + DEBUG(printIntervals("\tinactive", inactive_.begin(), inactive_.end())); + + processActiveIntervals(i); + // processInactiveIntervals(i); + + // if this register is preallocated, look for an interval that + // overlaps with it and assign it to a memory location + if (i->reg < MRegisterInfo::FirstVirtualRegister) { + for (IntervalPtrs::iterator + ai = active_.begin(), ae = active_.end(); ai != ae; ++ai) { + unsigned virtReg = (*ai)->reg; + Virt2PhysMap::const_iterator it = v2pMap_.find(virtReg); + if (it != v2pMap_.end() && it->second == i->reg) { + active_.erase(ai); + clearVirtReg(virtReg); + break; + } + } + reservePhysReg(i->reg); + active_.push_back(&*i); + } + // otherwise we are allocating a virtual register. try to find + // a free physical register or spill an interval in order to + // assign it one (we could spill the current though). + else { + unsigned physReg = getFreePhysReg(i->reg); + if (!physReg) { + assignStackSlotAtInterval(i); + } + else { + assignVirt2PhysReg(i->reg, physReg); + active_.push_back(&*i); + } + } + } + DEBUG(std::cerr << "finished register allocation\n"); + DEBUG(printVirt2PhysMap()); + + DEBUG(std::cerr << "Rewrite machine code:\n"); + for (MachineBasicBlockPtrs::iterator + mbbi = mbbs_.begin(), mbbe = mbbs_.end(); mbbi != mbbe; ++mbbi) { + instrAdded_ = 0; + currentMbb_ = *mbbi; + + for (currentInstr_ = currentMbb_->begin(); + currentInstr_ != currentMbb_->end(); ++currentInstr_) { + + DEBUG(std::cerr << "\tinstruction: "; + (*currentInstr_)->print(std::cerr, *tm_);); + DEBUG(std::cerr << "\t\tspilling temporarily defined operands " + "of previous instruction:\n"); + for (unsigned i = 0, e = tempDefOperands_.size(); i != e; ++i) { + spillVirtReg(tempDefOperands_[i]); + } + tempDefOperands_.clear(); + + // use our current mapping and actually replace and + // virtual register with its allocated physical registers + DEBUG(std::cerr << "\t\treplacing virtual registers with mapped " + "physical registers:\n"); + for (unsigned i = 0, e = (*currentInstr_)->getNumOperands(); + i != e; ++i) { + MachineOperand& op = (*currentInstr_)->getOperand(i); + if (op.isVirtualRegister()) { + unsigned virtReg = op.getAllocatedRegNum(); + unsigned physReg = v2pMap_[virtReg]; + // if this virtual registers lives on the stack, + // load it to a temporary physical register + if (physReg) { + DEBUG(std::cerr << "\t\t\t%reg" << virtReg + << " -> " << mri_->getName(physReg) << '\n'); + (*currentInstr_)->SetMachineOperandReg(i, physReg); + } + } + } + + DEBUG(std::cerr << "\t\tloading temporarily used operands to " + "registers:\n"); + for (unsigned i = 0, e = (*currentInstr_)->getNumOperands(); + i != e; ++i) { + MachineOperand& op = (*currentInstr_)->getOperand(i); + if (op.isVirtualRegister() && op.opIsUse()) { + unsigned virtReg = op.getAllocatedRegNum(); + unsigned physReg = v2pMap_[virtReg]; + if (!physReg) { + physReg = getFreeTempPhysReg(virtReg); + } + loadVirt2PhysReg(virtReg, physReg); + tempUseOperands_.push_back(virtReg); + (*currentInstr_)->SetMachineOperandReg(i, physReg); + } + } + + DEBUG(std::cerr << "\t\tclearing temporarily used operands:\n"); + for (unsigned i = 0, e = tempUseOperands_.size(); i != e; ++i) { + clearVirtReg(tempUseOperands_[i]); + } + tempUseOperands_.clear(); + + DEBUG(std::cerr << "\t\tassigning temporarily defined operands to " + "registers:\n"); + for (unsigned i = 0, e = (*currentInstr_)->getNumOperands(); + i != e; ++i) { + MachineOperand& op = (*currentInstr_)->getOperand(i); + if (op.isVirtualRegister() && !op.opIsUse()) { + unsigned virtReg = op.getAllocatedRegNum(); + unsigned physReg = v2pMap_[virtReg]; + if (!physReg) { + physReg = getFreeTempPhysReg(virtReg); + } + if (op.opIsDefAndUse()) { + loadVirt2PhysReg(virtReg, physReg); + } + else { + assignVirt2PhysReg(virtReg, physReg); + } + tempDefOperands_.push_back(virtReg); + (*currentInstr_)->SetMachineOperandReg(i, physReg); + } + } + + + // if the instruction is a two address instruction and the + // source operands are not identical we need to insert + // extra instructions. + + unsigned opcode = (*currentInstr_)->getOpcode(); + if (tm_->getInstrInfo().isTwoAddrInstr(opcode) && + (*currentInstr_)->getOperand(0).getAllocatedRegNum() != + (*currentInstr_)->getOperand(1).getAllocatedRegNum()) { + assert((*currentInstr_)->getOperand(1).isRegister() && + (*currentInstr_)->getOperand(1).getAllocatedRegNum() && + (*currentInstr_)->getOperand(1).opIsUse() && + "Two address instruction invalid"); + + unsigned regA = + (*currentInstr_)->getOperand(0).getAllocatedRegNum(); + unsigned regB = + (*currentInstr_)->getOperand(1).getAllocatedRegNum(); + unsigned regC = + ((*currentInstr_)->getNumOperands() > 2 && + (*currentInstr_)->getOperand(2).isRegister()) ? + (*currentInstr_)->getOperand(2).getAllocatedRegNum() : + 0; + + const TargetRegisterClass* rc = mri_->getRegClass(regA); + + // special case: "a = b op a". If b is a temporary + // reserved register rewrite as: "b = b op a; a = b" + // otherwise use a temporary reserved register t and + // rewrite as: "t = b; t = t op a; a = t" + if (regC && regA == regC) { + // b is a temp reserved register + if (find(reserved_.begin(), reserved_.end(), + regB) != reserved_.end()) { + (*currentInstr_)->SetMachineOperandReg(0, regB); + ++currentInstr_; + instrAdded_ += mri_->copyRegToReg(*currentMbb_, + currentInstr_, + regA, + regB, + rc); + --currentInstr_; + } + // b is just a normal register + else { + unsigned tempReg = getFreeTempPhysReg(rc); + assert (tempReg && + "no free temp reserved physical register?"); + instrAdded_ += mri_->copyRegToReg(*currentMbb_, + currentInstr_, + tempReg, + regB, + rc); + (*currentInstr_)->SetMachineOperandReg(0, tempReg); + (*currentInstr_)->SetMachineOperandReg(1, tempReg); + ++currentInstr_; + instrAdded_ += mri_->copyRegToReg(*currentMbb_, + currentInstr_, + regA, + tempReg, + rc); + --currentInstr_; + } + } + // "a = b op c" gets rewritten to "a = b; a = a op c" + else { + instrAdded_ += mri_->copyRegToReg(*currentMbb_, + currentInstr_, + regA, + regB, + rc); + (*currentInstr_)->SetMachineOperandReg(1, regA); + } + } + } + + for (unsigned i = 0, e = p2vMap_.size(); i != e; ++i) { + assert(p2vMap_[i] != i && + "reserved physical registers at end of basic block?"); + } + } + + return true; +} + +void RA::processActiveIntervals(Intervals::const_iterator cur) +{ + DEBUG(std::cerr << "\tprocessing active intervals:\n"); + for (IntervalPtrs::iterator i = active_.begin(); i != active_.end();) { + unsigned reg = (*i)->reg; + // remove expired intervals. we expire earlier because this if + // an interval expires this is going to be the last use. in + // this case we can reuse the register for a def in the same + // instruction + if ((*i)->expired(cur->start() + 1)) { + DEBUG(std::cerr << "\t\tinterval " << **i << " expired\n"); + if (reg < MRegisterInfo::FirstVirtualRegister) { + clearReservedPhysReg(reg); + } + else { + p2vMap_[v2pMap_[reg]] = 0; + } + // remove interval from active + i = active_.erase(i); + } + // move not active intervals to inactive list +// else if (!(*i)->overlaps(curIndex)) { +// DEBUG(std::cerr << "\t\t\tinterval " << **i << " inactive\n"); +// unmarkReg(virtReg); +// // add interval to inactive +// inactive_.push_back(*i); +// // remove interval from active +// i = active_.erase(i); +// } + else { + ++i; + } + } +} + +void RA::processInactiveIntervals(Intervals::const_iterator cur) +{ +// DEBUG(std::cerr << "\tprocessing inactive intervals:\n"); +// for (IntervalPtrs::iterator i = inactive_.begin(); i != inactive_.end();) { +// unsigned virtReg = (*i)->reg; +// // remove expired intervals +// if ((*i)->expired(curIndex)) { +// DEBUG(std::cerr << "\t\t\tinterval " << **i << " expired\n"); +// freePhysReg(virtReg); +// // remove from inactive +// i = inactive_.erase(i); +// } +// // move re-activated intervals in active list +// else if ((*i)->overlaps(curIndex)) { +// DEBUG(std::cerr << "\t\t\tinterval " << **i << " active\n"); +// markReg(virtReg); +// // add to active +// active_.push_back(*i); +// // remove from inactive +// i = inactive_.erase(i); +// } +// else { +// ++i; +// } +// } +} + +void RA::assignStackSlotAtInterval(Intervals::const_iterator cur) +{ + DEBUG(std::cerr << "\t\tassigning stack slot at interval " + << *cur << ":\n"); + assert(!active_.empty() && + "active set cannot be empty when choosing a register to spill"); + const TargetRegisterClass* rcCur = + mf_->getSSARegMap()->getRegClass(cur->reg); + + // find the interval for a virtual register that ends last in + // active and belongs to the same register class as the current + // interval + IntervalPtrs::iterator lastEndActive = active_.begin(); + for (IntervalPtrs::iterator e = active_.end(); + lastEndActive != e; ++lastEndActive) { + if ((*lastEndActive)->reg >= MRegisterInfo::FirstVirtualRegister) { + const TargetRegisterClass* rc = + mri_->getRegClass(v2pMap_[(*lastEndActive)->reg]); + if (rcCur == rc) { + break; + } + } + } + for (IntervalPtrs::iterator i = lastEndActive, e = active_.end(); + i != e; ++i) { + if ((*i)->reg >= MRegisterInfo::FirstVirtualRegister) { + const TargetRegisterClass* rc = + mri_->getRegClass(v2pMap_[(*i)->reg]); + if (rcCur == rc && + (*lastEndActive)->end() < (*i)->end()) { + lastEndActive = i; + } + } + } + + // find the interval for a virtual register that ends last in + // inactive and belongs to the same register class as the current + // interval + IntervalPtrs::iterator lastEndInactive = inactive_.begin(); + for (IntervalPtrs::iterator e = inactive_.end(); + lastEndInactive != e; ++lastEndInactive) { + if ((*lastEndInactive)->reg >= MRegisterInfo::FirstVirtualRegister) { + const TargetRegisterClass* rc = + mri_->getRegClass(v2pMap_[(*lastEndInactive)->reg]); + if (rcCur == rc) { + break; + } + } + } + for (IntervalPtrs::iterator i = lastEndInactive, e = inactive_.end(); + i != e; ++i) { + if ((*i)->reg >= MRegisterInfo::FirstVirtualRegister) { + const TargetRegisterClass* rc = + mri_->getRegClass(v2pMap_[(*i)->reg]); + if (rcCur == rc && + (*lastEndInactive)->end() < (*i)->end()) { + lastEndInactive = i; + } + } + } + + unsigned lastEndActiveInactive = 0; + if (lastEndActive != active_.end() && + lastEndActiveInactive < (*lastEndActive)->end()) { + lastEndActiveInactive = (*lastEndActive)->end(); + } + if (lastEndInactive != inactive_.end() && + lastEndActiveInactive < (*lastEndInactive)->end()) { + lastEndActiveInactive = (*lastEndInactive)->end(); + } + + if (lastEndActiveInactive > cur->end()) { + if (lastEndInactive == inactive_.end() || + (*lastEndActive)->end() > (*lastEndInactive)->end()) { + assignVirt2StackSlot((*lastEndActive)->reg); + active_.erase(lastEndActive); + } + else { + assignVirt2StackSlot((*lastEndInactive)->reg); + inactive_.erase(lastEndInactive); + } + unsigned physReg = getFreePhysReg(cur->reg); + assert(physReg && "no free physical register after spill?"); + assignVirt2PhysReg(cur->reg, physReg); + active_.push_back(&*cur); + } + else { + assignVirt2StackSlot(cur->reg); + } +} + +void RA::reservePhysReg(unsigned physReg) +{ + DEBUG(std::cerr << "\t\t\treserving physical physical register: " + << mri_->getName(physReg) << '\n'); + // if this register holds a value spill it + unsigned virtReg = p2vMap_[physReg]; + if (virtReg != 0) { + assert(virtReg != physReg && "reserving an already reserved phus reg?"); + // remove interval from active + for (IntervalPtrs::iterator i = active_.begin(), e = active_.end(); + i != e; ++i) { + if ((*i)->reg == virtReg) { + active_.erase(i); + break; + } + } + spillVirtReg(virtReg); + } + p2vMap_[physReg] = physReg; // this denotes a reserved physical register +} + +void RA::clearReservedPhysReg(unsigned physReg) +{ + DEBUG(std::cerr << "\t\t\tclearing reserved physical physical register: " + << mri_->getName(physReg) << '\n'); + assert(p2vMap_[physReg] == physReg && + "attempt to clear a non reserved physical register"); + p2vMap_[physReg] = 0; +} + +bool RA::physRegAvailable(unsigned physReg) +{ + if (p2vMap_[physReg]) { + return false; + } + + // if it aliases other registers it is still not free + for (const unsigned* as = mri_->getAliasSet(physReg); *as; ++as) { + if (p2vMap_[*as]) { + return false; + } + } + + // if it is one of the reserved registers it is still not free + if (find(reserved_.begin(), reserved_.end(), physReg) != reserved_.end()) { + return false; + } + + return true; +} + +unsigned RA::getFreePhysReg(unsigned virtReg) +{ + DEBUG(std::cerr << "\t\tgetting free physical register: "); + const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass(virtReg); + TargetRegisterClass::iterator reg = rc->allocation_order_begin(*mf_); + TargetRegisterClass::iterator regEnd = rc->allocation_order_end(*mf_); + + for (; reg != regEnd; ++reg) { + if (physRegAvailable(*reg)) { + assert(*reg != 0 && "Cannot use register!"); + DEBUG(std::cerr << mri_->getName(*reg) << '\n'); + return *reg; // Found an unused register! + } + } + + DEBUG(std::cerr << "no free register\n"); + return 0; +} + +bool RA::tempPhysRegAvailable(unsigned physReg) +{ + assert(find(reserved_.begin(), reserved_.end(), physReg) != reserved_.end() + && "cannot call this method with a non reserved temp register"); + + if (p2vMap_[physReg]) { + return false; + } + + // if it aliases other registers it is still not free + for (const unsigned* as = mri_->getAliasSet(physReg); *as; ++as) { + if (p2vMap_[*as]) { + return false; + } + } + + return true; +} + +unsigned RA::getFreeTempPhysReg(const TargetRegisterClass* rc) +{ + DEBUG(std::cerr << "\t\tgetting free temporary physical register: "); + + for (Regs::const_iterator + reg = reserved_.begin(), regEnd = reserved_.end(); + reg != regEnd; ++reg) { + if (rc == mri_->getRegClass(*reg) && tempPhysRegAvailable(*reg)) { + assert(*reg != 0 && "Cannot use register!"); + DEBUG(std::cerr << mri_->getName(*reg) << '\n'); + return *reg; // Found an unused register! + } + } + assert(0 && "no free temporary physical register?"); + return 0; +} + +void RA::assignVirt2PhysReg(unsigned virtReg, unsigned physReg) +{ + assert((physRegAvailable(physReg) || + find(reserved_.begin(), + reserved_.end(), + physReg) != reserved_.end()) && + "attempt to allocate to a not available physical register"); + v2pMap_[virtReg] = physReg; + p2vMap_[physReg] = virtReg; +} + +void RA::clearVirtReg(unsigned virtReg) +{ + Virt2PhysMap::iterator it = v2pMap_.find(virtReg); + assert(it != v2pMap_.end() && + "attempting to clear a not allocated virtual register"); + unsigned physReg = it->second; + p2vMap_[physReg] = 0; + v2pMap_[virtReg] = 0; // this marks that this virtual register + // lives on the stack + DEBUG(std::cerr << "\t\t\tcleared register " << mri_->getName(physReg) + << "\n"); +} + +void RA::assignVirt2StackSlot(unsigned virtReg) +{ + const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass(virtReg); + int frameIndex = mf_->getFrameInfo()->CreateStackObject(rc); + + bool inserted = v2ssMap_.insert(std::make_pair(virtReg, frameIndex)).second; + assert(inserted && + "attempt to assign stack slot to already assigned register?"); + // if the virtual register was previously assigned clear the mapping + // and free the virtual register + if (v2pMap_.find(virtReg) != v2pMap_.end()) { + clearVirtReg(virtReg); + } +} + +int RA::findOrCreateStackSlot(unsigned virtReg) +{ + // use lower_bound so that we can do a possibly O(1) insert later + // if necessary + Virt2StackSlotMap::iterator it = v2ssMap_.lower_bound(virtReg); + if (it != v2ssMap_.end() && it->first == virtReg) { + return it->second; + } + const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass(virtReg); + int frameIndex = mf_->getFrameInfo()->CreateStackObject(rc); + + v2ssMap_.insert(it, std::make_pair(virtReg, frameIndex)); + + return frameIndex; +} + +void RA::spillVirtReg(unsigned virtReg) +{ + DEBUG(std::cerr << "\t\t\tspilling register: " << virtReg); + const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass(virtReg); + int frameIndex = findOrCreateStackSlot(virtReg); + DEBUG(std::cerr << " to stack slot #" << frameIndex << '\n'); + ++numSpilled; + instrAdded_ += mri_->storeRegToStackSlot(*currentMbb_, currentInstr_, + v2pMap_[virtReg], frameIndex, rc); + clearVirtReg(virtReg); +} + +void RA::loadVirt2PhysReg(unsigned virtReg, unsigned physReg) +{ + DEBUG(std::cerr << "\t\t\tloading register: " << virtReg); + const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass(virtReg); + int frameIndex = findOrCreateStackSlot(virtReg); + DEBUG(std::cerr << " from stack slot #" << frameIndex << '\n'); + ++numReloaded; + instrAdded_ += mri_->loadRegFromStackSlot(*currentMbb_, currentInstr_, + physReg, frameIndex, rc); + assignVirt2PhysReg(virtReg, physReg); +} + +FunctionPass* llvm::createLinearScanRegisterAllocator() { + return new RA(); +}