Update CodeGen for MRegisterInfo --> TargetInstrInfo changes.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45673 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Owen Anderson 2008-01-07 01:35:56 +00:00
parent 43dbe05279
commit 6425f8be72
5 changed files with 14 additions and 12 deletions

View File

@ -709,8 +709,8 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
FoldOps.push_back(OpIdx);
}
MachineInstr *fmi = isSS ? mri_->foldMemoryOperand(MI, FoldOps, Slot)
: mri_->foldMemoryOperand(MI, FoldOps, DefMI);
MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
: tii_->foldMemoryOperand(MI, FoldOps, DefMI);
if (fmi) {
// Attempt to fold the memory reference into the instruction. If
// we can do this, we don't need to insert spill code.
@ -746,7 +746,7 @@ bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
FoldOps.push_back(OpIdx);
}
return mri_->canFoldMemoryOperand(MI, FoldOps);
return tii_->canFoldMemoryOperand(MI, FoldOps);
}
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {

View File

@ -505,6 +505,7 @@ unsigned RABigBlock::chooseReg(MachineBasicBlock &MBB, MachineInstr *I,
MachineInstr *RABigBlock::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
unsigned OpNum) {
unsigned VirtReg = MI->getOperand(OpNum).getReg();
const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
// If the virtual register is already available in a physical register,
// just update the instruction and return.
@ -525,7 +526,7 @@ MachineInstr *RABigBlock::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI
// try to fold the spill into the instruction
SmallVector<unsigned, 2> Ops;
Ops.push_back(OpNum);
if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) {
if(MachineInstr* FMI = TII->foldMemoryOperand(MI, Ops, FrameIndex)) {
++NumFolded;
// Since we changed the address of MI, make sure to update live variables
// to know that the new instruction has the properties of the old one.
@ -545,7 +546,6 @@ MachineInstr *RABigBlock::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI
<< RegInfo->getName(PhysReg) << "\n";
// Add move instruction(s)
const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
TII->loadRegFromStackSlot(MBB, MI, PhysReg, FrameIndex, RC);
++NumLoads; // Update statistics

View File

@ -50,6 +50,7 @@ namespace {
const TargetMachine *TM;
MachineFunction *MF;
const MRegisterInfo *MRI;
const TargetInstrInfo *TII;
LiveVariables *LV;
// StackSlotForVirtReg - Maps virtual regs to the frame index where these
@ -478,7 +479,7 @@ MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
// If we can fold this spill into this instruction, do so now.
SmallVector<unsigned, 2> Ops;
Ops.push_back(OpNum);
if (MachineInstr* FMI = MRI->foldMemoryOperand(MI, Ops, FrameIndex)) {
if (MachineInstr* FMI = TII->foldMemoryOperand(MI, Ops, FrameIndex)) {
++NumFolded;
// Since we changed the address of MI, make sure to update live variables
// to know that the new instruction has the properties of the old one.
@ -801,6 +802,7 @@ bool RALocal::runOnMachineFunction(MachineFunction &Fn) {
MF = &Fn;
TM = &Fn.getTarget();
MRI = TM->getRegisterInfo();
TII = TM->getInstrInfo();
LV = &getAnalysis<LiveVariables>();
PhysRegsUsed.assign(MRI->getNumRegs(), -1);

View File

@ -413,7 +413,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
if (TryUnfold) {
SmallVector<SDNode*, 4> NewNodes;
if (!MRI->unfoldMemoryOperand(DAG, N, NewNodes))
if (!TII->unfoldMemoryOperand(DAG, N, NewNodes))
return NULL;
DOUT << "Unfolding SU # " << SU->NodeNum << "\n";

View File

@ -793,7 +793,7 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
DeadStore->findRegisterUseOperandIdx(PhysReg, true) == -1)
continue;
UnfoldPR = PhysReg;
UnfoldedOpc = MRI->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
false, true);
}
}
@ -831,7 +831,7 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
// unfolded. This allows us to perform the store unfolding
// optimization.
SmallVector<MachineInstr*, 4> NewMIs;
if (MRI->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
assert(NewMIs.size() == 1);
MachineInstr *NewMI = NewMIs.back();
NewMIs.clear();
@ -839,7 +839,7 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
assert(Idx != -1);
SmallVector<unsigned, 2> Ops;
Ops.push_back(Idx);
MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Ops, SS);
MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
if (FoldedMI) {
if (!VRM.hasPhys(UnfoldVR))
VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
@ -1294,7 +1294,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
SmallVector<MachineInstr*, 4> NewMIs;
if (PhysReg &&
MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
MBB.insert(MII, NewMIs[0]);
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
@ -1321,7 +1321,7 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
if (PhysReg &&
!TII->isStoreToStackSlot(&MI, SS) && // Not profitable!
DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 &&
MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
MBB.insert(MII, NewMIs[0]);
NewStore = NewMIs[1];
MBB.insert(MII, NewStore);