R600/SI: Remove unused SGPR spilling code

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216218 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tom Stellard 2014-08-21 20:40:56 +00:00
parent 9b60cb102a
commit 7af96a25fc
2 changed files with 0 additions and 80 deletions

View File

@ -28,69 +28,8 @@ void SIMachineFunctionInfo::anchor() {}
SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
: AMDGPUMachineFunction(MF),
PSInputAddr(0),
SpillTracker(),
NumUserSGPRs(0) { }
static unsigned createLaneVGPR(MachineRegisterInfo &MRI, MachineFunction *MF) {
unsigned VGPR = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
// We need to add this register as live out for the function, in order to
// have the live range calculated directly.
//
// When register spilling begins, we have already calculated the live
// live intervals for all the registers. Since we are spilling SGPRs to
// VGPRs, we need to update the Lane VGPR's live interval every time we
// spill or restore a register.
//
// Unfortunately, there is no good way to update the live interval as
// the TargetInstrInfo callbacks for spilling and restoring don't give
// us access to the live interval information.
//
// We are lucky, though, because the InlineSpiller calls
// LiveRangeEdit::calculateRegClassAndHint() which iterates through
// all the new register that have been created when restoring a register
// and calls LiveIntervals::getInterval(), which creates and computes
// the live interval for the newly created register. However, once this
// live intervals is created, it doesn't change and since we usually reuse
// the Lane VGPR multiple times, this means any uses after the first aren't
// added to the live interval.
//
// To work around this, we add Lane VGPRs to the functions live out list,
// so that we can guarantee its live range will cover all of its uses.
for (MachineBasicBlock &MBB : *MF) {
if (MBB.back().getOpcode() == AMDGPU::S_ENDPGM) {
MBB.back().addOperand(*MF, MachineOperand::CreateReg(VGPR, false, true));
return VGPR;
}
}
LLVMContext &Ctx = MF->getFunction()->getContext();
Ctx.emitError("Could not find S_ENDPGM instruction.");
return VGPR;
}
unsigned SIMachineFunctionInfo::RegSpillTracker::reserveLanes(
MachineRegisterInfo &MRI, MachineFunction *MF, unsigned NumRegs) {
unsigned StartLane = CurrentLane;
CurrentLane += NumRegs;
if (!LaneVGPR) {
LaneVGPR = createLaneVGPR(MRI, MF);
} else {
if (CurrentLane >= MAX_LANES) {
StartLane = CurrentLane = 0;
LaneVGPR = createLaneVGPR(MRI, MF);
}
}
return StartLane;
}
void SIMachineFunctionInfo::RegSpillTracker::addSpilledReg(unsigned FrameIndex,
unsigned Reg,
int Lane) {
SpilledRegisters[FrameIndex] = SpilledReg(Reg, Lane);
}
/// \brief Returns a register that is not used at any point in the function.
/// If all registers are used, then this function will return
// AMDGPU::NoRegister.

View File

@ -36,31 +36,12 @@ public:
bool hasLane() { return Lane != -1;}
};
struct RegSpillTracker {
private:
unsigned CurrentLane;
std::map<unsigned, SpilledReg> SpilledRegisters;
public:
unsigned LaneVGPR;
RegSpillTracker() : CurrentLane(0), SpilledRegisters(), LaneVGPR(0) { }
/// \p NumRegs The number of consecutive registers what need to be spilled.
/// This function will ensure that all registers are stored in
/// the same VGPR.
/// \returns The lane to be used for storing the first register.
unsigned reserveLanes(MachineRegisterInfo &MRI, MachineFunction *MF,
unsigned NumRegs = 1);
void addSpilledReg(unsigned FrameIndex, unsigned Reg, int Lane = -1);
const SpilledReg& getSpilledReg(unsigned FrameIndex);
bool programSpillsRegisters() { return !SpilledRegisters.empty(); }
};
// SIMachineFunctionInfo definition
SIMachineFunctionInfo(const MachineFunction &MF);
SpilledReg getSpilledReg(MachineFunction *MF, unsigned FrameIndex,
unsigned SubIdx);
unsigned PSInputAddr;
struct RegSpillTracker SpillTracker;
unsigned NumUserSGPRs;
std::map<unsigned, unsigned> LaneVGPRs;
};