Exit with nice warnings when register allocator run out of registers.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@63267 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2009-01-29 02:20:59 +00:00
parent e5af2d3a22
commit 5a3c6a87b0
5 changed files with 124 additions and 41 deletions

View File

@ -377,6 +377,10 @@ namespace llvm {
const int *RHSValNoAssignments,
SmallVector<VNInfo*, 16> &NewVNInfo);
/// isInOneLiveRange - Return true if the range specified is entirely in the
/// a single LiveRange of the live interval.
bool isInOneLiveRange(unsigned Start, unsigned End);
/// removeRange - Remove the specified range from this interval. Note that
/// the range must be a single LiveRange in its entirety.
void removeRange(unsigned Start, unsigned End, bool RemoveDeadValNo = false);

View File

@ -244,6 +244,16 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
return ranges.insert(it, LR);
}
/// isInOneLiveRange - Return true if the range specified is entirely in the
/// a single LiveRange of the live interval.
bool LiveInterval::isInOneLiveRange(unsigned Start, unsigned End) {
Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
if (I == ranges.begin())
return false;
--I;
return I->contains(Start) && I->contains(End-1);
}
/// removeRange - Remove the specified range from this interval. Note that
/// the range must be in a single LiveRange in its entirety.

View File

@ -2228,7 +2228,19 @@ void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
unsigned Index = getInstructionIndex(MI);
if (pli.liveAt(Index)) {
vrm.addEmergencySpill(SpillReg, MI);
pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
unsigned StartIdx = getLoadIndex(Index);
unsigned EndIdx = getStoreIndex(Index)+1;
if (pli.isInOneLiveRange(StartIdx, EndIdx))
pli.removeRange(StartIdx, EndIdx);
else {
cerr << "Ran out of registers during register allocation!\n";
if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
cerr << "Please check your inline asm statement for invalid "
<< "constraints:\n";
MI->print(cerr.stream(), tm_);
}
exit(1);
}
for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
if (!hasInterval(*AS))
continue;

View File

@ -27,6 +27,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
@ -237,7 +238,7 @@ namespace {
/// value. This method returns the modified instruction.
///
MachineInstr *reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
unsigned OpNum);
unsigned OpNum, SmallSet<unsigned, 4> &RRegs);
/// ComputeLocalLiveness - Computes liveness of registers within a basic
/// block, setting the killed/dead flags as appropriate.
@ -475,7 +476,8 @@ unsigned RALocal::getReg(MachineBasicBlock &MBB, MachineInstr *I,
/// modified instruction.
///
MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
unsigned OpNum) {
unsigned OpNum,
SmallSet<unsigned, 4> &ReloadedRegs) {
unsigned VirtReg = MI->getOperand(OpNum).getReg();
// If the virtual register is already available, just update the instruction
@ -513,6 +515,29 @@ MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
MF->getRegInfo().setPhysRegUsed(PhysReg);
MI->getOperand(OpNum).setReg(PhysReg); // Assign the input register
getVirtRegLastUse(VirtReg) = std::make_pair(MI, OpNum);
if (!ReloadedRegs.insert(PhysReg)) {
cerr << "Ran out of registers during register allocation!\n";
if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
cerr << "Please check your inline asm statement for invalid "
<< "constraints:\n";
MI->print(cerr.stream(), TM);
}
exit(1);
}
for (const unsigned *SubRegs = TRI->getSubRegisters(PhysReg);
*SubRegs; ++SubRegs) {
if (!ReloadedRegs.insert(*SubRegs)) {
cerr << "Ran out of registers during register allocation!\n";
if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
cerr << "Please check your inline asm statement for invalid "
<< "constraints:\n";
MI->print(cerr.stream(), TM);
}
exit(1);
}
}
return MI;
}
@ -581,17 +606,16 @@ void RALocal::ComputeLocalLiveness(MachineBasicBlock& MBB) {
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue;
const unsigned* subregs = TRI->getAliasSet(MO.getReg());
if (subregs) {
while (*subregs) {
const unsigned* Aliases = TRI->getAliasSet(MO.getReg());
if (Aliases) {
while (*Aliases) {
DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
alias = LastUseDef.find(*subregs);
alias = LastUseDef.find(*Aliases);
if (alias != LastUseDef.end() &&
alias->second.first != I)
LastUseDef[*subregs] = std::make_pair(I, i);
if (alias != LastUseDef.end() && alias->second.first != I)
LastUseDef[*Aliases] = std::make_pair(I, i);
++subregs;
++Aliases;
}
}
}
@ -695,12 +719,12 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
MF->getRegInfo().setPhysRegUsed(Reg);
PhysRegsUsed[Reg] = 0; // It is free and reserved now
AddToPhysRegsUseOrder(Reg);
for (const unsigned *AliasSet = TRI->getSubRegisters(Reg);
*AliasSet; ++AliasSet) {
if (PhysRegsUsed[*AliasSet] != -2) {
AddToPhysRegsUseOrder(*AliasSet);
PhysRegsUsed[*AliasSet] = 0; // It is free and reserved now
MF->getRegInfo().setPhysRegUsed(*AliasSet);
for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
*SubRegs; ++SubRegs) {
if (PhysRegsUsed[*SubRegs] != -2) {
AddToPhysRegsUseOrder(*SubRegs);
PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
MF->getRegInfo().setPhysRegUsed(*SubRegs);
}
}
}
@ -778,12 +802,12 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
PhysRegsUsed[Reg] = 0; // It is free and reserved now
AddToPhysRegsUseOrder(Reg);
for (const unsigned *AliasSet = TRI->getSubRegisters(Reg);
*AliasSet; ++AliasSet) {
if (PhysRegsUsed[*AliasSet] != -2) {
MF->getRegInfo().setPhysRegUsed(*AliasSet);
PhysRegsUsed[*AliasSet] = 0; // It is free and reserved now
AddToPhysRegsUseOrder(*AliasSet);
for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
*SubRegs; ++SubRegs) {
if (PhysRegsUsed[*SubRegs] != -2) {
MF->getRegInfo().setPhysRegUsed(*SubRegs);
PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
AddToPhysRegsUseOrder(*SubRegs);
}
}
}
@ -797,12 +821,13 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
// physical register is referenced by the instruction, that it is guaranteed
// to be live-in, or the input is badly hosed.
//
SmallSet<unsigned, 4> ReloadedRegs;
for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
MachineOperand& MO = MI->getOperand(i);
// here we are looking for only used operands (never def&use)
if (MO.isReg() && !MO.isDef() && MO.getReg() && !MO.isImplicit() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg()))
MI = reloadVirtReg(MBB, MI, i);
MI = reloadVirtReg(MBB, MI, i, ReloadedRegs);
}
// If this instruction is the last user of this register, kill the
@ -830,13 +855,13 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
DOUT << " Last use of " << TRI->getName(PhysReg)
<< "[%reg" << VirtReg <<"], removing it from live set\n";
removePhysReg(PhysReg);
for (const unsigned *AliasSet = TRI->getSubRegisters(PhysReg);
*AliasSet; ++AliasSet) {
if (PhysRegsUsed[*AliasSet] != -2) {
for (const unsigned *SubRegs = TRI->getSubRegisters(PhysReg);
*SubRegs; ++SubRegs) {
if (PhysRegsUsed[*SubRegs] != -2) {
DOUT << " Last use of "
<< TRI->getName(*AliasSet)
<< TRI->getName(*SubRegs)
<< "[%reg" << VirtReg <<"], removing it from live set\n";
removePhysReg(*AliasSet);
removePhysReg(*SubRegs);
}
}
}
@ -861,12 +886,12 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
PhysRegsUsed[Reg] = 0; // It is free and reserved now
AddToPhysRegsUseOrder(Reg);
for (const unsigned *AliasSet = TRI->getSubRegisters(Reg);
*AliasSet; ++AliasSet) {
if (PhysRegsUsed[*AliasSet] != -2) {
MF->getRegInfo().setPhysRegUsed(*AliasSet);
PhysRegsUsed[*AliasSet] = 0; // It is free and reserved now
AddToPhysRegsUseOrder(*AliasSet);
for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
*SubRegs; ++SubRegs) {
if (PhysRegsUsed[*SubRegs] != -2) {
MF->getRegInfo().setPhysRegUsed(*SubRegs);
PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
AddToPhysRegsUseOrder(*SubRegs);
}
}
}
@ -883,12 +908,12 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
PhysRegsUsed[Reg] = 0; // It is free and reserved now
}
MF->getRegInfo().setPhysRegUsed(Reg);
for (const unsigned *AliasSet = TRI->getSubRegisters(Reg);
*AliasSet; ++AliasSet) {
if (PhysRegsUsed[*AliasSet] != -2) {
AddToPhysRegsUseOrder(*AliasSet);
PhysRegsUsed[*AliasSet] = 0; // It is free and reserved now
MF->getRegInfo().setPhysRegUsed(*AliasSet);
for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
*SubRegs; ++SubRegs) {
if (PhysRegsUsed[*SubRegs] != -2) {
AddToPhysRegsUseOrder(*SubRegs);
PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
MF->getRegInfo().setPhysRegUsed(*SubRegs);
}
}
}

View File

@ -0,0 +1,32 @@
; RUN: llvm-as < %s | llc -mtriple=i386-apple-darwin -disable-fp-elim
; XFAIL: *
; Expected to run out of registers during allocation.
; rdar://6251720
%struct.CABACContext = type { i32, i32, i8* }
%struct.H264Context = type { %struct.CABACContext, [460 x i8] }
@coeff_abs_level_m1_offset = common global [6 x i32] zeroinitializer ; <[6 x i32]*> [#uses=1]
@coeff_abs_level1_ctx = common global [8 x i8] zeroinitializer ; <[8 x i8]*> [#uses=1]
define i32 @decode_cabac_residual(%struct.H264Context* %h, i32 %cat) nounwind {
entry:
%0 = getelementptr [6 x i32]* @coeff_abs_level_m1_offset, i32 0, i32 %cat ; <i32*> [#uses=1]
%1 = load i32* %0, align 4 ; <i32> [#uses=1]
%2 = load i8* getelementptr ([8 x i8]* @coeff_abs_level1_ctx, i32 0, i32 0), align 1 ; <i8> [#uses=1]
%3 = zext i8 %2 to i32 ; <i32> [#uses=1]
%.sum = add i32 %3, %1 ; <i32> [#uses=1]
%4 = getelementptr %struct.H264Context* %h, i32 0, i32 1, i32 %.sum ; <i8*> [#uses=2]
%5 = getelementptr %struct.H264Context* %h, i32 0, i32 0, i32 0 ; <i32*> [#uses=2]
%6 = getelementptr %struct.H264Context* %h, i32 0, i32 0, i32 1 ; <i32*> [#uses=2]
%7 = getelementptr %struct.H264Context* %h, i32 0, i32 0, i32 2 ; <i8**> [#uses=2]
%8 = load i32* %5, align 4 ; <i32> [#uses=1]
%9 = load i32* %6, align 4 ; <i32> [#uses=1]
%10 = load i8* %4, align 4 ; <i8> [#uses=1]
%asmtmp = tail call { i32, i32, i32, i32 } asm sideeffect "#$0 $1 $2 $3 $4 $5", "=&{di},=r,=r,=*m,=&q,=*imr,1,2,*m,5,~{dirflag},~{fpsr},~{flags},~{cx}"(i8** %7, i8* %4, i32 %8, i32 %9, i8** %7, i8 %10) nounwind ; <{ i32, i32, i32, i32 }> [#uses=3]
%asmresult = extractvalue { i32, i32, i32, i32 } %asmtmp, 0 ; <i32> [#uses=1]
%asmresult1 = extractvalue { i32, i32, i32, i32 } %asmtmp, 1 ; <i32> [#uses=1]
store i32 %asmresult1, i32* %5
%asmresult2 = extractvalue { i32, i32, i32, i32 } %asmtmp, 2 ; <i32> [#uses=1]
store i32 %asmresult2, i32* %6
ret i32 %asmresult
}