RISC architectures get their memory operand folding for free.

The only folding these load/store architectures can do is converting COPY into a
load or store, and the target independent part of foldMemoryOperand already
knows how to do that.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@108099 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jakob Stoklund Olesen
2010-07-11 19:19:13 +00:00
parent a66450d227
commit 600f171486
16 changed files with 0 additions and 812 deletions

View File

@@ -261,80 +261,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
llvm_unreachable("Register class not handled!");
}
MachineInstr *MipsInstrInfo::
foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops, int FI) const
{
if (Ops.size() != 1) return NULL;
MachineInstr *NewMI = NULL;
switch (MI->getOpcode()) {
case Mips::ADDu:
if ((MI->getOperand(0).isReg()) &&
(MI->getOperand(1).isReg()) &&
(MI->getOperand(1).getReg() == Mips::ZERO) &&
(MI->getOperand(2).isReg())) {
if (Ops[0] == 0) { // COPY -> STORE
unsigned SrcReg = MI->getOperand(2).getReg();
bool isKill = MI->getOperand(2).isKill();
bool isUndef = MI->getOperand(2).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Mips::SW))
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addImm(0).addFrameIndex(FI);
} else { // COPY -> LOAD
unsigned DstReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
bool isUndef = MI->getOperand(0).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Mips::LW))
.addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
getUndefRegState(isUndef))
.addImm(0).addFrameIndex(FI);
}
}
break;
case Mips::FMOV_S32:
case Mips::FMOV_D32:
if ((MI->getOperand(0).isReg()) &&
(MI->getOperand(1).isReg())) {
const TargetRegisterClass
*RC = RI.getRegClass(MI->getOperand(0).getReg());
unsigned StoreOpc, LoadOpc;
bool IsMips1 = TM.getSubtarget<MipsSubtarget>().isMips1();
if (RC == Mips::FGR32RegisterClass) {
LoadOpc = Mips::LWC1; StoreOpc = Mips::SWC1;
} else {
assert(RC == Mips::AFGR64RegisterClass);
// Mips1 doesn't have ldc/sdc instructions.
if (IsMips1) break;
LoadOpc = Mips::LDC1; StoreOpc = Mips::SDC1;
}
if (Ops[0] == 0) { // COPY -> STORE
unsigned SrcReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
bool isUndef = MI->getOperand(2).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(StoreOpc))
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addImm(0).addFrameIndex(FI) ;
} else { // COPY -> LOAD
unsigned DstReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
bool isUndef = MI->getOperand(0).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(LoadOpc))
.addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
getUndefRegState(isUndef))
.addImm(0).addFrameIndex(FI);
}
}
break;
}
return NewMI;
}
//===----------------------------------------------------------------------===//
// Branch Analysis
//===----------------------------------------------------------------------===//

View File

@@ -222,18 +222,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;