Move even more functionality from MRegisterInfo into TargetInstrInfo.

Some day I'll get it all moved over...


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45672 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Owen Anderson 2008-01-07 01:35:02 +00:00
parent 93f96d00bf
commit 43dbe05279
30 changed files with 1599 additions and 1630 deletions

View File

@ -484,58 +484,6 @@ public:
unsigned DestReg,
const MachineInstr *Orig) const = 0;
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
/// operand folded, otherwise NULL is returned. The client is responsible for
/// removing the old instruction and adding the new one in the instruction
/// stream.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
return 0;
}
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
/// canFoldMemoryOperand - Returns true if the specified load / store is
/// folding is possible.
virtual
bool canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const{
return false;
}
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
return false;
}
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const {
return false;
}
/// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
/// instruction after load / store are unfolded from an instruction of the
/// specified opcode. It returns zero if the specified unfolding is not
/// possible.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore) const {
return 0;
}
/// targetHandlesStackFrameRounding - Returns true if the target is
/// responsible for rounding up the stack frame (probably at emitPrologue
/// time).

View File

@ -27,6 +27,8 @@ class TargetMachine;
class TargetRegisterClass;
class LiveVariables;
class CalleeSavedInfo;
class SDNode;
class SelectionDAG;
template<class T> class SmallVectorImpl;
@ -540,6 +542,58 @@ public:
return false;
}
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
/// operand folded, otherwise NULL is returned. The client is responsible for
/// removing the old instruction and adding the new one in the instruction
/// stream.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
return 0;
}
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
/// canFoldMemoryOperand - Returns true if the specified load / store is
/// folding is possible.
virtual
bool canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const{
return false;
}
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
return false;
}
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const {
return false;
}
/// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
/// instruction after load / store are unfolded from an instruction of the
/// specified opcode. It returns zero if the specified unfolding is not
/// possible.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore) const {
return 0;
}
/// BlockHasNoFallThrough - Return true if the specified block does not
/// fall-through into its successor block. This is primarily used when a
/// branch is unanalyzable. It is useful for things like unconditional

View File

@ -643,6 +643,119 @@ bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
return true;
}
MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FI) const {
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
switch (Opc) {
default: break;
case ARM::MOVr: {
if (MI->getOperand(4).getReg() == ARM::CPSR)
// If it is updating CPSR, then it cannot be foled.
break;
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(get(ARM::STR)).addReg(SrcReg).addFrameIndex(FI)
.addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(get(ARM::LDR), DstReg).addFrameIndex(FI).addReg(0)
.addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
case ARM::tMOVr: {
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
// tSpill cannot take a high register operand.
break;
NewMI = BuildMI(get(ARM::tSpill)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
// tRestore cannot target a high register operand.
break;
NewMI = BuildMI(get(ARM::tRestore), DstReg).addFrameIndex(FI)
.addImm(0);
}
break;
}
case ARM::FCPYS: {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(get(ARM::FLDS), DstReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
case ARM::FCPYD: {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(get(ARM::FSTD)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(get(ARM::FLDD), DstReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
bool ARMInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
switch (Opc) {
default: break;
case ARM::MOVr:
// If it is updating CPSR, then it cannot be foled.
return MI->getOperand(4).getReg() != ARM::CPSR;
case ARM::tMOVr: {
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
// tSpill cannot take a high register operand.
return false;
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
// tRestore cannot target a high register operand.
return false;
}
return true;
}
case ARM::FCPYS:
case ARM::FCPYD:
return true;
}
return false;
}
bool ARMInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;

View File

@ -190,6 +190,20 @@ public:
virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
virtual bool canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const;
virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
virtual bool ReverseBranchCondition(std::vector<MachineOperand> &Cond) const;

View File

@ -136,7 +136,7 @@ void ARMRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
/// isLowRegister - Returns true if the register is low register r0-r7.
///
static bool isLowRegister(unsigned Reg) {
bool ARMRegisterInfo::isLowRegister(unsigned Reg) const {
using namespace ARM;
switch (Reg) {
case R0: case R1: case R2: case R3:
@ -147,119 +147,6 @@ static bool isLowRegister(unsigned Reg) {
}
}
MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FI) const {
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
switch (Opc) {
default: break;
case ARM::MOVr: {
if (MI->getOperand(4).getReg() == ARM::CPSR)
// If it is updating CPSR, then it cannot be foled.
break;
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::STR)).addReg(SrcReg).addFrameIndex(FI)
.addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(TII.get(ARM::LDR), DstReg).addFrameIndex(FI).addReg(0)
.addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
case ARM::tMOVr: {
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
if (isPhysicalRegister(SrcReg) && !isLowRegister(SrcReg))
// tSpill cannot take a high register operand.
break;
NewMI = BuildMI(TII.get(ARM::tSpill)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
if (isPhysicalRegister(DstReg) && !isLowRegister(DstReg))
// tRestore cannot target a high register operand.
break;
NewMI = BuildMI(TII.get(ARM::tRestore), DstReg).addFrameIndex(FI)
.addImm(0);
}
break;
}
case ARM::FCPYS: {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(TII.get(ARM::FLDS), DstReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
case ARM::FCPYD: {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::FSTD)).addReg(SrcReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(TII.get(ARM::FLDD), DstReg).addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
bool ARMRegisterInfo::canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
switch (Opc) {
default: break;
case ARM::MOVr:
// If it is updating CPSR, then it cannot be foled.
return MI->getOperand(4).getReg() != ARM::CPSR;
case ARM::tMOVr: {
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
if (isPhysicalRegister(SrcReg) && !isLowRegister(SrcReg))
// tSpill cannot take a high register operand.
return false;
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
if (isPhysicalRegister(DstReg) && !isLowRegister(DstReg))
// tRestore cannot target a high register operand.
return false;
}
return true;
}
case ARM::FCPYS:
case ARM::FCPYD:
return true;
}
return false;
}
const unsigned*
ARMRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
static const unsigned CalleeSavedRegs[] = {
@ -426,12 +313,13 @@ static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
/// constpool entry.
static
void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, unsigned BaseReg,
int NumBytes, bool CanChangeCC,
const TargetInstrInfo &TII) {
bool isHigh = !isLowRegister(DestReg) ||
(BaseReg != 0 && !isLowRegister(BaseReg));
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, unsigned BaseReg,
int NumBytes, bool CanChangeCC,
const TargetInstrInfo &TII,
const ARMRegisterInfo& MRI) {
bool isHigh = !MRI.isLowRegister(DestReg) ||
(BaseReg != 0 && !MRI.isLowRegister(BaseReg));
bool isSub = false;
// Subtract doesn't have high register version. Load the negative value
// if either base or dest register is a high register. Also, if do not
@ -476,7 +364,8 @@ static
void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, unsigned BaseReg,
int NumBytes, const TargetInstrInfo &TII) {
int NumBytes, const TargetInstrInfo &TII,
const ARMRegisterInfo& MRI) {
bool isSub = NumBytes < 0;
unsigned Bytes = (unsigned)NumBytes;
if (isSub) Bytes = -NumBytes;
@ -522,12 +411,12 @@ void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
if (NumMIs > Threshold) {
// This will expand into too many instructions. Load the immediate from a
// constpool entry.
emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII);
emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII, MRI);
return;
}
if (DstNotEqBase) {
if (isLowRegister(DestReg) && isLowRegister(BaseReg)) {
if (MRI.isLowRegister(DestReg) && MRI.isLowRegister(BaseReg)) {
// If both are low registers, emit DestReg = add BaseReg, max(Imm, 7)
unsigned Chunk = (1 << 3) - 1;
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
@ -577,9 +466,10 @@ void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
static
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
int NumBytes, ARMCC::CondCodes Pred, unsigned PredReg,
bool isThumb, const TargetInstrInfo &TII) {
bool isThumb, const TargetInstrInfo &TII,
const ARMRegisterInfo& MRI) {
if (isThumb)
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII);
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII, MRI);
else
emitARMRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes,
Pred, PredReg, TII);
@ -610,12 +500,12 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
// Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
unsigned PredReg = isThumb ? 0 : Old->getOperand(2).getReg();
emitSPUpdate(MBB, I, -Amount, Pred, PredReg, isThumb, TII);
emitSPUpdate(MBB, I, -Amount, Pred, PredReg, isThumb, TII, *this);
} else {
// Note: PredReg is operand 3 for ADJCALLSTACKUP.
unsigned PredReg = isThumb ? 0 : Old->getOperand(3).getReg();
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
emitSPUpdate(MBB, I, Amount, Pred, PredReg, isThumb, TII);
emitSPUpdate(MBB, I, Amount, Pred, PredReg, isThumb, TII, *this);
}
}
}
@ -627,7 +517,8 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
static void emitThumbConstant(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, int Imm,
const TargetInstrInfo &TII) {
const TargetInstrInfo &TII,
const ARMRegisterInfo& MRI) {
bool isSub = Imm < 0;
if (isSub) Imm = -Imm;
@ -636,7 +527,7 @@ static void emitThumbConstant(MachineBasicBlock &MBB,
Imm -= ThisVal;
BuildMI(MBB, MBBI, TII.get(ARM::tMOVi8), DestReg).addImm(ThisVal);
if (Imm > 0)
emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII);
emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII, MRI);
if (isSub)
BuildMI(MBB, MBBI, TII.get(ARM::tNEG), DestReg)
.addReg(DestReg, false, false, true);
@ -770,7 +661,7 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// MI would expand into a large number of instructions. Don't try to
// simplify the immediate.
if (NumMIs > 2) {
emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII);
emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII, *this);
MBB.erase(II);
return;
}
@ -783,12 +674,12 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(i+1).ChangeToImmediate(Mask);
Offset = (Offset - Mask * Scale);
MachineBasicBlock::iterator NII = next(II);
emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII);
emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII, *this);
} else {
// Translate r0 = add sp, -imm to
// r0 = -imm (this is then translated into a series of instructons)
// r0 = add r0, sp
emitThumbConstant(MBB, II, DestReg, Offset, TII);
emitThumbConstant(MBB, II, DestReg, Offset, TII, *this);
MI.setInstrDescriptor(TII.get(ARM::tADDhirr));
MI.getOperand(i).ChangeToRegister(DestReg, false, false, true);
MI.getOperand(i+1).ChangeToRegister(FrameReg, false);
@ -891,13 +782,14 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
bool UseRR = false;
if (Opcode == ARM::tRestore) {
if (FrameReg == ARM::SP)
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
Offset, false, TII, *this);
else {
emitLoadConstPool(MBB, II, TmpReg, Offset, ARMCC::AL, 0, TII, true);
UseRR = true;
}
} else
emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII);
emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII, *this);
MI.setInstrDescriptor(TII.get(ARM::tLDR));
MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
if (UseRR)
@ -927,13 +819,14 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
.addReg(ARM::R3, false, false, true);
if (Opcode == ARM::tSpill) {
if (FrameReg == ARM::SP)
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
Offset, false, TII, *this);
else {
emitLoadConstPool(MBB, II, TmpReg, Offset, ARMCC::AL, 0, TII, true);
UseRR = true;
}
} else
emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII);
emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII, *this);
MI.setInstrDescriptor(TII.get(ARM::tSTR));
MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
if (UseRR) // Use [reg, reg] addrmode.
@ -1266,11 +1159,11 @@ void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
int FramePtrSpillFI = 0;
if (VARegSaveSize)
emitSPUpdate(MBB, MBBI, -VARegSaveSize, ARMCC::AL, 0, isThumb, TII);
emitSPUpdate(MBB, MBBI, -VARegSaveSize, ARMCC::AL, 0, isThumb, TII, *this);
if (!AFI->hasStackFrame()) {
if (NumBytes != 0)
emitSPUpdate(MBB, MBBI, -NumBytes, ARMCC::AL, 0, isThumb, TII);
emitSPUpdate(MBB, MBBI, -NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
return;
}
@ -1310,7 +1203,7 @@ void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
if (!isThumb) {
// Build the new SUBri to adjust SP for integer callee-save spill area 1.
emitSPUpdate(MBB, MBBI, -GPRCS1Size, ARMCC::AL, 0, isThumb, TII);
emitSPUpdate(MBB, MBBI, -GPRCS1Size, ARMCC::AL, 0, isThumb, TII, *this);
movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, 1, STI);
} else if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH)
++MBBI;
@ -1326,11 +1219,11 @@ void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
if (!isThumb) {
// Build the new SUBri to adjust SP for integer callee-save spill area 2.
emitSPUpdate(MBB, MBBI, -GPRCS2Size, ARMCC::AL, 0, false, TII);
emitSPUpdate(MBB, MBBI, -GPRCS2Size, ARMCC::AL, 0, false, TII, *this);
// Build the new SUBri to adjust SP for FP callee-save spill area.
movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, 2, STI);
emitSPUpdate(MBB, MBBI, -DPRCSSize, ARMCC::AL, 0, false, TII);
emitSPUpdate(MBB, MBBI, -DPRCSSize, ARMCC::AL, 0, false, TII, *this);
}
// Determine starting offsets of spill areas.
@ -1347,7 +1240,7 @@ void ARMRegisterInfo::emitPrologue(MachineFunction &MF) const {
// Insert it after all the callee-save spills.
if (!isThumb)
movePastCSLoadStoreOps(MBB, MBBI, ARM::FSTD, 3, STI);
emitSPUpdate(MBB, MBBI, -NumBytes, ARMCC::AL, 0, isThumb, TII);
emitSPUpdate(MBB, MBBI, -NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
}
if(STI.isTargetELF() && hasFP(MF)) {
@ -1390,7 +1283,7 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
int NumBytes = (int)MFI->getStackSize();
if (!AFI->hasStackFrame()) {
if (NumBytes != 0)
emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, isThumb, TII);
emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
} else {
// Unwind MBBI to point to first LDR / FLDD.
const unsigned *CSRegs = getCalleeSavedRegs();
@ -1412,7 +1305,8 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
// Reset SP based on frame pointer only if the stack frame extends beyond
// frame pointer stack slot or target is ELF and the function has FP.
if (NumBytes)
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes, TII);
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes,
TII, *this);
else
BuildMI(MBB, MBBI, TII.get(ARM::tMOVr), ARM::SP).addReg(FramePtr);
} else {
@ -1420,9 +1314,9 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
&MBB.front() != MBBI &&
prior(MBBI)->getOpcode() == ARM::tPOP) {
MachineBasicBlock::iterator PMBBI = prior(MBBI);
emitSPUpdate(MBB, PMBBI, NumBytes, ARMCC::AL, 0, isThumb, TII);
emitSPUpdate(MBB, PMBBI, NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
} else
emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, isThumb, TII);
emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, isThumb, TII, *this);
}
} else {
// Darwin ABI requires FP to point to the stack slot that contains the
@ -1443,23 +1337,23 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
BuildMI(MBB, MBBI, TII.get(ARM::MOVr), ARM::SP).addReg(FramePtr)
.addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
} else if (NumBytes) {
emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, false, TII);
emitSPUpdate(MBB, MBBI, NumBytes, ARMCC::AL, 0, false, TII, *this);
}
// Move SP to start of integer callee save spill area 2.
movePastCSLoadStoreOps(MBB, MBBI, ARM::FLDD, 3, STI);
emitSPUpdate(MBB, MBBI, AFI->getDPRCalleeSavedAreaSize(), ARMCC::AL, 0,
false, TII);
false, TII, *this);
// Move SP to start of integer callee save spill area 1.
movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, 2, STI);
emitSPUpdate(MBB, MBBI, AFI->getGPRCalleeSavedArea2Size(), ARMCC::AL, 0,
false, TII);
false, TII, *this);
// Move SP to SP upon entry to the function.
movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, 1, STI);
emitSPUpdate(MBB, MBBI, AFI->getGPRCalleeSavedArea1Size(), ARMCC::AL, 0,
false, TII);
false, TII, *this);
}
}
@ -1469,7 +1363,7 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
// FIXME: Verify this is still ok when R3 is no longer being reserved.
BuildMI(MBB, MBBI, TII.get(ARM::tPOP)).addReg(ARM::R3);
emitSPUpdate(MBB, MBBI, VARegSaveSize, ARMCC::AL, 0, isThumb, TII);
emitSPUpdate(MBB, MBBI, VARegSaveSize, ARMCC::AL, 0, isThumb, TII, *this);
if (isThumb) {
BuildMI(MBB, MBBI, TII.get(ARM::tBX_RET_vararg)).addReg(ARM::R3);

View File

@ -40,19 +40,6 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
bool canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const;
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const TargetRegisterClass* const*
@ -90,6 +77,8 @@ public:
unsigned getEHHandlerRegister() const;
int getDwarfRegNum(unsigned RegNum, bool isEH) const;
bool isLowRegister(unsigned Reg) const;
};
} // end namespace llvm

View File

@ -250,6 +250,43 @@ void AlphaInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
NewMIs.push_back(MIB);
}
MachineInstr *AlphaInstrInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
if (Ops.size() != 1) return NULL;
// Make sure this is a reg-reg copy.
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
switch(Opc) {
default:
break;
case Alpha::BISr:
case Alpha::CPYSS:
case Alpha::CPYST:
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
if (Ops[0] == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
NewMI = BuildMI(get(Opc)).addReg(InReg).addFrameIndex(FrameIndex)
.addReg(Alpha::F31);
} else { // load -> move
unsigned OutReg = MI->getOperand(0).getReg();
Opc = (Opc == Alpha::BISr) ? Alpha::LDQ :
((Opc == Alpha::CPYSS) ? Alpha::LDS : Alpha::LDT);
NewMI = BuildMI(get(Opc), OutReg).addFrameIndex(FrameIndex)
.addReg(Alpha::F31);
}
}
break;
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return 0;
}
static unsigned AlphaRevCondCode(unsigned Opcode) {
switch (Opcode) {
case Alpha::BEQ: return Alpha::BNE;

View File

@ -66,6 +66,17 @@ public:
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
std::vector<MachineOperand> &Cond) const;

View File

@ -58,43 +58,6 @@ AlphaRegisterInfo::AlphaRegisterInfo(const TargetInstrInfo &tii)
{
}
MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
if (Ops.size() != 1) return NULL;
// Make sure this is a reg-reg copy.
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
switch(Opc) {
default:
break;
case Alpha::BISr:
case Alpha::CPYSS:
case Alpha::CPYST:
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
if (Ops[0] == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
NewMI = BuildMI(TII.get(Opc)).addReg(InReg).addFrameIndex(FrameIndex)
.addReg(Alpha::F31);
} else { // load -> move
unsigned OutReg = MI->getOperand(0).getReg();
Opc = (Opc == Alpha::BISr) ? Alpha::LDQ :
((Opc == Alpha::CPYSS) ? Alpha::LDS : Alpha::LDT);
NewMI = BuildMI(TII.get(Opc), OutReg).addFrameIndex(FrameIndex)
.addReg(Alpha::F31);
}
}
break;
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return 0;
}
void AlphaRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,

View File

@ -28,16 +28,6 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
AlphaRegisterInfo(const TargetInstrInfo &tii);
/// Code Generation virtual methods...
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;

View File

@ -388,3 +388,42 @@ void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
}
}
/// foldMemoryOperand - SPU, like PPC, can only fold spills into
/// copy instructions, turning them into load/store instructions.
MachineInstr *
SPUInstrInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const
{
#if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = 0;
if ((Opc == SPU::ORr32
|| Opc == SPU::ORv4i32)
&& MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
NewMI = addFrameReference(BuildMI(TII.get(SPU::STQDr32)).addReg(InReg),
FrameIndex);
}
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset()) ? SPU::STQDr32 : SPU::STQXr32;
NewMI = addFrameReference(BuildMI(TII.get(Opc), OutReg), FrameIndex);
}
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
#else
return 0;
#endif
}

View File

@ -74,7 +74,19 @@ namespace llvm {
virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
SmallVectorImpl<MachineInstr*> &NewMIs) const;
//! Fold spills into load/store instructions
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
//! Fold any load/store to an operand
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
};
}

View File

@ -295,54 +295,6 @@ BitVector SPURegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
/// foldMemoryOperand - SPU, like PPC, can only fold spills into
/// copy instructions, turning them into load/store instructions.
MachineInstr *
SPURegisterInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const
{
#if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = 0;
if ((Opc == SPU::ORr32
|| Opc == SPU::ORv4i32)
&& MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
NewMI = addFrameReference(BuildMI(TII.get(SPU::STQDr32)).addReg(InReg),
FrameIndex);
}
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset()) ? SPU::STQDr32 : SPU::STQXr32;
NewMI = addFrameReference(BuildMI(TII.get(Opc), OutReg), FrameIndex);
}
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
#else
return 0;
#endif
}
/// General-purpose load/store fold to operand code
MachineInstr *
SPURegisterInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const
{
return 0;
}
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//

View File

@ -42,16 +42,6 @@ namespace llvm {
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
//! Fold spills into load/store instructions
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
//! Fold any load/store to an operand
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
//! Return the array of callee-saved registers
virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF) const;

View File

@ -371,6 +371,37 @@ void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
return;
}
MachineInstr *MipsInstrInfo::
foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops, int FI) const
{
if (Ops.size() != 1) return NULL;
MachineInstr *NewMI = NULL;
switch (MI->getOpcode())
{
case Mips::ADDu:
if ((MI->getOperand(0).isRegister()) &&
(MI->getOperand(1).isRegister()) &&
(MI->getOperand(1).getReg() == Mips::ZERO) &&
(MI->getOperand(2).isRegister()))
{
if (Ops[0] == 0) // COPY -> STORE
NewMI = BuildMI(get(Mips::SW)).addFrameIndex(FI)
.addImm(0).addReg(MI->getOperand(2).getReg());
else // COPY -> LOAD
NewMI = BuildMI(get(Mips::LW), MI->getOperand(0)
.getReg()).addImm(0).addFrameIndex(FI);
}
break;
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
unsigned MipsInstrInfo::
RemoveBranch(MachineBasicBlock &MBB) const
{

View File

@ -105,6 +105,17 @@ public:
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
virtual bool ReverseBranchCondition(std::vector<MachineOperand> &Cond) const;

View File

@ -93,37 +93,6 @@ void MipsRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
MBB.insert(I, MI);
}
MachineInstr *MipsRegisterInfo::
foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops, int FI) const
{
if (Ops.size() != 1) return NULL;
MachineInstr *NewMI = NULL;
switch (MI->getOpcode())
{
case Mips::ADDu:
if ((MI->getOperand(0).isRegister()) &&
(MI->getOperand(1).isRegister()) &&
(MI->getOperand(1).getReg() == Mips::ZERO) &&
(MI->getOperand(2).isRegister()))
{
if (Ops[0] == 0) // COPY -> STORE
NewMI = BuildMI(TII.get(Mips::SW)).addFrameIndex(FI)
.addImm(0).addReg(MI->getOperand(2).getReg());
else // COPY -> LOAD
NewMI = BuildMI(TII.get(Mips::LW), MI->getOperand(0)
.getReg()).addImm(0).addFrameIndex(FI);
}
break;
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
//===----------------------------------------------------------------------===//
//
// Callee Saved Registers methods

View File

@ -35,16 +35,6 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
const TargetRegisterClass* const*

View File

@ -534,6 +534,85 @@ void PPCInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
return;
}
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
/// copy instructions, turning them into load/store instructions.
MachineInstr *PPCInstrInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
if (Ops.size() != 1) return NULL;
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
// it takes more than one instruction to store it.
unsigned Opc = MI->getOpcode();
unsigned OpNum = Ops[0];
MachineInstr *NewMI = NULL;
if ((Opc == PPC::OR &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
NewMI = addFrameReference(BuildMI(get(PPC::STW)).addReg(InReg),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
NewMI = addFrameReference(BuildMI(get(PPC::LWZ), OutReg),
FrameIndex);
}
} else if ((Opc == PPC::OR8 &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
NewMI = addFrameReference(BuildMI(get(PPC::STD)).addReg(InReg),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
NewMI = addFrameReference(BuildMI(get(PPC::LD), OutReg), FrameIndex);
}
} else if (Opc == PPC::FMRD) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
NewMI = addFrameReference(BuildMI(get(PPC::STFD)).addReg(InReg),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
NewMI = addFrameReference(BuildMI(get(PPC::LFD), OutReg), FrameIndex);
}
} else if (Opc == PPC::FMRS) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
NewMI = addFrameReference(BuildMI(get(PPC::STFS)).addReg(InReg),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
NewMI = addFrameReference(BuildMI(get(PPC::LFS), OutReg), FrameIndex);
}
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
bool PPCInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
// it takes more than one instruction to store it.
unsigned Opc = MI->getOpcode();
if ((Opc == PPC::OR &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
return true;
else if ((Opc == PPC::OR8 &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
return true;
else if (Opc == PPC::FMRD || Opc == PPC::FMRS)
return true;
return false;
}
bool PPCInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;

View File

@ -129,6 +129,21 @@ public:
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
/// copy instructions, turning them into load/store instructions.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
virtual bool canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const;
virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
virtual bool ReverseBranchCondition(std::vector<MachineOperand> &Cond) const;
};

View File

@ -298,85 +298,6 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
/// copy instructions, turning them into load/store instructions.
MachineInstr *PPCRegisterInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
if (Ops.size() != 1) return NULL;
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
// it takes more than one instruction to store it.
unsigned Opc = MI->getOpcode();
unsigned OpNum = Ops[0];
MachineInstr *NewMI = NULL;
if ((Opc == PPC::OR &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
NewMI = addFrameReference(BuildMI(TII.get(PPC::STW)).addReg(InReg),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
NewMI = addFrameReference(BuildMI(TII.get(PPC::LWZ), OutReg),
FrameIndex);
}
} else if ((Opc == PPC::OR8 &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
NewMI = addFrameReference(BuildMI(TII.get(PPC::STD)).addReg(InReg),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
NewMI = addFrameReference(BuildMI(TII.get(PPC::LD), OutReg), FrameIndex);
}
} else if (Opc == PPC::FMRD) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
NewMI = addFrameReference(BuildMI(TII.get(PPC::STFD)).addReg(InReg),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
NewMI = addFrameReference(BuildMI(TII.get(PPC::LFD), OutReg), FrameIndex);
}
} else if (Opc == PPC::FMRS) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
NewMI = addFrameReference(BuildMI(TII.get(PPC::STFS)).addReg(InReg),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
NewMI = addFrameReference(BuildMI(TII.get(PPC::LFS), OutReg), FrameIndex);
}
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
bool PPCRegisterInfo::canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
// it takes more than one instruction to store it.
unsigned Opc = MI->getOpcode();
if ((Opc == PPC::OR &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
return true;
else if ((Opc == PPC::OR8 &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
return true;
else if (Opc == PPC::FMRD || Opc == PPC::FMRS)
return true;
return false;
}
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//

View File

@ -38,21 +38,6 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
/// copy instructions, turning them into load/store instructions.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
virtual bool canFoldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops) const;
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
const TargetRegisterClass* const*

View File

@ -221,3 +221,41 @@ void SparcInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
NewMIs.push_back(MIB);
return;
}
MachineInstr *SparcInstrInfo::foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FI) const {
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
bool isFloat = false;
MachineInstr *NewMI = NULL;
switch (MI->getOpcode()) {
case SP::ORrr:
if (MI->getOperand(1).isRegister() && MI->getOperand(1).getReg() == SP::G0&&
MI->getOperand(0).isRegister() && MI->getOperand(2).isRegister()) {
if (OpNum == 0) // COPY -> STORE
NewMI = BuildMI(get(SP::STri)).addFrameIndex(FI).addImm(0)
.addReg(MI->getOperand(2).getReg());
else // COPY -> LOAD
NewMI = BuildMI(get(SP::LDri), MI->getOperand(0).getReg())
.addFrameIndex(FI).addImm(0);
}
break;
case SP::FMOVS:
isFloat = true;
// FALLTHROUGH
case SP::FMOVD:
if (OpNum == 0) // COPY -> STORE
NewMI = BuildMI(get(isFloat ? SP::STFri : SP::STDFri))
.addFrameIndex(FI).addImm(0).addReg(MI->getOperand(1).getReg());
else // COPY -> LOAD
NewMI = BuildMI(get(isFloat ? SP::LDFri : SP::LDDFri),
MI->getOperand(0).getReg()).addFrameIndex(FI).addImm(0);
break;
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
}

View File

@ -93,6 +93,16 @@ public:
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
};
}

View File

@ -39,44 +39,6 @@ void SparcRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
MBB.insert(I, MI);
}
MachineInstr *SparcRegisterInfo::foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FI) const {
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
bool isFloat = false;
MachineInstr *NewMI = NULL;
switch (MI->getOpcode()) {
case SP::ORrr:
if (MI->getOperand(1).isRegister() && MI->getOperand(1).getReg() == SP::G0&&
MI->getOperand(0).isRegister() && MI->getOperand(2).isRegister()) {
if (OpNum == 0) // COPY -> STORE
NewMI = BuildMI(TII.get(SP::STri)).addFrameIndex(FI).addImm(0)
.addReg(MI->getOperand(2).getReg());
else // COPY -> LOAD
NewMI = BuildMI(TII.get(SP::LDri), MI->getOperand(0).getReg())
.addFrameIndex(FI).addImm(0);
}
break;
case SP::FMOVS:
isFloat = true;
// FALLTHROUGH
case SP::FMOVD:
if (OpNum == 0) // COPY -> STORE
NewMI = BuildMI(TII.get(isFloat ? SP::STFri : SP::STDFri))
.addFrameIndex(FI).addImm(0).addReg(MI->getOperand(1).getReg());
else // COPY -> LOAD
NewMI = BuildMI(TII.get(isFloat ? SP::LDFri : SP::LDDFri),
MI->getOperand(0).getReg()).addFrameIndex(FI).addImm(0);
break;
}
if (NewMI)
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
const unsigned* SparcRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
const {
static const unsigned CalleeSavedRegs[] = { 0 };

View File

@ -33,16 +33,6 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const TargetRegisterClass* const* getCalleeSavedRegClasses(

File diff suppressed because it is too large Load Diff

View File

@ -225,6 +225,19 @@ namespace X86II {
class X86InstrInfo : public TargetInstrInfoImpl {
X86TargetMachine &TM;
const X86RegisterInfo RI;
/// RegOp2MemOpTable2Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
/// RegOp2MemOpTable2 - Load / store folding opcode maps.
///
DenseMap<unsigned*, unsigned> RegOp2MemOpTable2Addr;
DenseMap<unsigned*, unsigned> RegOp2MemOpTable0;
DenseMap<unsigned*, unsigned> RegOp2MemOpTable1;
DenseMap<unsigned*, unsigned> RegOp2MemOpTable2;
/// MemOp2RegOpTable - Load / store unfolding opcode map.
///
DenseMap<unsigned*, std::pair<unsigned, unsigned> > MemOp2RegOpTable;
public:
X86InstrInfo(X86TargetMachine &tm);
@ -305,6 +318,44 @@ public:
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const;
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
/// specified operand(s). If this is possible, the target should perform the
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
/// canFoldMemoryOperand - Returns true if the specified load / store is
/// folding is possible.
virtual bool canFoldMemoryOperand(MachineInstr*, SmallVectorImpl<unsigned> &) const;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const;
/// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
/// instruction after load / store are unfolded from an instruction of the
/// specified opcode. It returns zero if the specified unfolding is not
/// possible.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore) const;
virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
virtual bool ReverseBranchCondition(std::vector<MachineOperand> &Cond) const;
@ -319,6 +370,11 @@ public:
unsigned char getBaseOpcodeFor(MachineOpCode Opcode) const {
return getBaseOpcodeFor(&get(Opcode));
}
private:
MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
SmallVector<MachineOperand,4> &MOs) const;
};
} // End llvm namespace

File diff suppressed because it is too large Load Diff

View File

@ -66,18 +66,6 @@ private:
///
unsigned FramePtr;
/// RegOp2MemOpTable2Addr, RegOp2MemOpTable0, RegOp2MemOpTable1,
/// RegOp2MemOpTable2 - Load / store folding opcode maps.
///
DenseMap<unsigned*, unsigned> RegOp2MemOpTable2Addr;
DenseMap<unsigned*, unsigned> RegOp2MemOpTable0;
DenseMap<unsigned*, unsigned> RegOp2MemOpTable1;
DenseMap<unsigned*, unsigned> RegOp2MemOpTable2;
/// MemOp2RegOpTable - Load / store unfolding opcode map.
///
DenseMap<unsigned*, std::pair<unsigned, unsigned> > MemOp2RegOpTable;
public:
X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii);
@ -99,44 +87,6 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
/// specified operand(s). If this is possible, the target should perform the
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
/// canFoldMemoryOperand - Returns true if the specified load / store is
/// folding is possible.
bool canFoldMemoryOperand(MachineInstr*, SmallVectorImpl<unsigned> &) const;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const;
/// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
/// instruction after load / store are unfolded from an instruction of the
/// specified opcode. It returns zero if the specified unfolding is not
/// possible.
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore) const;
/// getCalleeSavedRegs - Return a null-terminated list of all of the
/// callee-save registers on this target.
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
@ -177,11 +127,6 @@ public:
// Exception handling queries.
unsigned getEHExceptionRegister() const;
unsigned getEHHandlerRegister() const;
private:
MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
SmallVector<MachineOperand,4> &MOs) const;
};
// getX86SubSuperRegister - X86 utility function. It returns the sub or super