mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-12 07:37:34 +00:00
RISC architectures get their memory operand folding for free.
The only folding these load/store architectures can do is converting COPY into a load or store, and the target independent part of foldMemoryOperand already knows how to do that. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@108099 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
a66450d227
commit
600f171486
@ -949,223 +949,6 @@ ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
|
||||
return &*MIB;
|
||||
}
|
||||
|
||||
MachineInstr *ARMBaseInstrInfo::
|
||||
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops, int FI) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
unsigned Opc = MI->getOpcode();
|
||||
MachineInstr *NewMI = NULL;
|
||||
if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
|
||||
// If it is updating CPSR, then it cannot be folded.
|
||||
if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
|
||||
return NULL;
|
||||
unsigned Pred = MI->getOperand(2).getImm();
|
||||
unsigned PredReg = MI->getOperand(3).getReg();
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
unsigned SrcSubReg = MI->getOperand(1).getSubReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
if (Opc == ARM::MOVr)
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
|
||||
.addReg(SrcReg,
|
||||
getKillRegState(isKill) | getUndefRegState(isUndef),
|
||||
SrcSubReg)
|
||||
.addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
|
||||
else // ARM::t2MOVr
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
|
||||
.addReg(SrcReg,
|
||||
getKillRegState(isKill) | getUndefRegState(isUndef),
|
||||
SrcSubReg)
|
||||
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
unsigned DstSubReg = MI->getOperand(0).getSubReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
if (Opc == ARM::MOVr)
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
|
||||
.addReg(DstReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef), DstSubReg)
|
||||
.addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
|
||||
else // ARM::t2MOVr
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
|
||||
.addReg(DstReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef), DstSubReg)
|
||||
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
|
||||
}
|
||||
} else if (Opc == ARM::tMOVgpr2gpr ||
|
||||
Opc == ARM::tMOVtgpr2gpr ||
|
||||
Opc == ARM::tMOVgpr2tgpr) {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
unsigned SrcSubReg = MI->getOperand(1).getSubReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
|
||||
.addReg(SrcReg,
|
||||
getKillRegState(isKill) | getUndefRegState(isUndef),
|
||||
SrcSubReg)
|
||||
.addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
unsigned DstSubReg = MI->getOperand(0).getSubReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
|
||||
.addReg(DstReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef),
|
||||
DstSubReg)
|
||||
.addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
|
||||
}
|
||||
} else if (Opc == ARM::VMOVS) {
|
||||
unsigned Pred = MI->getOperand(2).getImm();
|
||||
unsigned PredReg = MI->getOperand(3).getReg();
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
unsigned SrcSubReg = MI->getOperand(1).getSubReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
|
||||
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
|
||||
SrcSubReg)
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0).addImm(Pred).addReg(PredReg);
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
unsigned DstSubReg = MI->getOperand(0).getSubReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
|
||||
.addReg(DstReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef),
|
||||
DstSubReg)
|
||||
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
|
||||
}
|
||||
} else if (Opc == ARM::VMOVD || Opc == ARM::VMOVDneon) {
|
||||
unsigned Pred = MI->getOperand(2).getImm();
|
||||
unsigned PredReg = MI->getOperand(3).getReg();
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
unsigned SrcSubReg = MI->getOperand(1).getSubReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
|
||||
.addReg(SrcReg,
|
||||
getKillRegState(isKill) | getUndefRegState(isUndef),
|
||||
SrcSubReg)
|
||||
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
unsigned DstSubReg = MI->getOperand(0).getSubReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
|
||||
.addReg(DstReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef),
|
||||
DstSubReg)
|
||||
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
|
||||
}
|
||||
} else if (Opc == ARM::VMOVQ) {
|
||||
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
||||
unsigned Pred = MI->getOperand(2).getImm();
|
||||
unsigned PredReg = MI->getOperand(3).getReg();
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
unsigned SrcSubReg = MI->getOperand(1).getSubReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
if (MFI.getObjectAlignment(FI) >= 16 &&
|
||||
getRegisterInfo().canRealignStack(MF)) {
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VST1q))
|
||||
.addFrameIndex(FI).addImm(16)
|
||||
.addReg(SrcReg,
|
||||
getKillRegState(isKill) | getUndefRegState(isUndef),
|
||||
SrcSubReg)
|
||||
.addImm(Pred).addReg(PredReg);
|
||||
} else {
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTMQ))
|
||||
.addReg(SrcReg,
|
||||
getKillRegState(isKill) | getUndefRegState(isUndef),
|
||||
SrcSubReg)
|
||||
.addFrameIndex(FI).addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
|
||||
.addImm(Pred).addReg(PredReg);
|
||||
}
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
unsigned DstSubReg = MI->getOperand(0).getSubReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
if (MFI.getObjectAlignment(FI) >= 16 &&
|
||||
getRegisterInfo().canRealignStack(MF)) {
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLD1q))
|
||||
.addReg(DstReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef),
|
||||
DstSubReg)
|
||||
.addFrameIndex(FI).addImm(16).addImm(Pred).addReg(PredReg);
|
||||
} else {
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDMQ))
|
||||
.addReg(DstReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef),
|
||||
DstSubReg)
|
||||
.addFrameIndex(FI).addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
|
||||
.addImm(Pred).addReg(PredReg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
MachineInstr*
|
||||
ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
// FIXME
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const {
|
||||
if (Ops.size() != 1) return false;
|
||||
|
||||
unsigned Opc = MI->getOpcode();
|
||||
if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
|
||||
// If it is updating CPSR, then it cannot be folded.
|
||||
return MI->getOperand(4).getReg() != ARM::CPSR ||
|
||||
MI->getOperand(4).isDead();
|
||||
} else if (Opc == ARM::tMOVgpr2gpr ||
|
||||
Opc == ARM::tMOVtgpr2gpr ||
|
||||
Opc == ARM::tMOVgpr2tgpr) {
|
||||
return true;
|
||||
} else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD ||
|
||||
Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// FIXME: VMOVQQ and VMOVQQQQ?
|
||||
|
||||
return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
|
||||
}
|
||||
|
||||
/// Create a copy of a const pool value. Update CPI to the new index and return
|
||||
/// the label UID.
|
||||
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
|
||||
|
@ -296,19 +296,6 @@ public:
|
||||
const MDNode *MDPtr,
|
||||
DebugLoc DL) const;
|
||||
|
||||
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const;
|
||||
|
||||
virtual void reMaterialize(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg, unsigned SubIdx,
|
||||
|
@ -53,39 +53,6 @@ void Thumb1InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
||||
"Thumb1 can only copy GPR registers");
|
||||
}
|
||||
|
||||
bool Thumb1InstrInfo::
|
||||
canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const {
|
||||
if (Ops.size() != 1) return false;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
unsigned Opc = MI->getOpcode();
|
||||
switch (Opc) {
|
||||
default: break;
|
||||
case ARM::tMOVr:
|
||||
case ARM::tMOVtgpr2gpr:
|
||||
case ARM::tMOVgpr2tgpr:
|
||||
case ARM::tMOVgpr2gpr: {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
|
||||
!isARMLowRegister(SrcReg))
|
||||
// tSpill cannot take a high register operand.
|
||||
return false;
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
|
||||
!isARMLowRegister(DstReg))
|
||||
// tRestore cannot target a high register operand.
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void Thumb1InstrInfo::
|
||||
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
unsigned SrcReg, bool isKill, int FI,
|
||||
@ -214,46 +181,3 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
MachineInstr *Thumb1InstrInfo::
|
||||
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops, int FI) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
unsigned Opc = MI->getOpcode();
|
||||
MachineInstr *NewMI = NULL;
|
||||
switch (Opc) {
|
||||
default: break;
|
||||
case ARM::tMOVr:
|
||||
case ARM::tMOVtgpr2gpr:
|
||||
case ARM::tMOVgpr2tgpr:
|
||||
case ARM::tMOVgpr2gpr: {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
|
||||
!isARMLowRegister(SrcReg))
|
||||
// tSpill cannot take a high register operand.
|
||||
break;
|
||||
NewMI = AddDefaultPred(BuildMI(MF, MI->getDebugLoc(), get(ARM::tSpill))
|
||||
.addReg(SrcReg, getKillRegState(isKill))
|
||||
.addFrameIndex(FI).addImm(0));
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
|
||||
!isARMLowRegister(DstReg))
|
||||
// tRestore cannot target a high register operand.
|
||||
break;
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
NewMI = AddDefaultPred(BuildMI(MF, MI->getDebugLoc(), get(ARM::tRestore))
|
||||
.addReg(DstReg,
|
||||
RegState::Define | getDeadRegState(isDead))
|
||||
.addFrameIndex(FI).addImm(0));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
@ -62,20 +62,6 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const;
|
||||
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -215,51 +215,6 @@ AlphaInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
llvm_unreachable("Unhandled register class");
|
||||
}
|
||||
|
||||
MachineInstr *AlphaInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
// Make sure this is a reg-reg copy.
|
||||
unsigned Opc = MI->getOpcode();
|
||||
|
||||
MachineInstr *NewMI = NULL;
|
||||
switch(Opc) {
|
||||
default:
|
||||
break;
|
||||
case Alpha::BISr:
|
||||
case Alpha::CPYSS:
|
||||
case Alpha::CPYST:
|
||||
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
|
||||
if (Ops[0] == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
|
||||
((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(InReg, getKillRegState(isKill) | getUndefRegState(isUndef))
|
||||
.addFrameIndex(FrameIndex)
|
||||
.addReg(Alpha::F31);
|
||||
} else { // load -> move
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
Opc = (Opc == Alpha::BISr) ? Alpha::LDQ :
|
||||
((Opc == Alpha::CPYSS) ? Alpha::LDS : Alpha::LDT);
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(OutReg, RegState::Define | getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef))
|
||||
.addFrameIndex(FrameIndex)
|
||||
.addReg(Alpha::F31);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
static unsigned AlphaRevCondCode(unsigned Opcode) {
|
||||
switch (Opcode) {
|
||||
case Alpha::BEQ: return Alpha::BNE;
|
||||
|
@ -61,18 +61,6 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
|
||||
MachineBasicBlock *&FBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond,
|
||||
|
@ -332,88 +332,6 @@ SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
addFrameReference(BuildMI(MBB, MI, DL, get(opc), DestReg), FrameIdx);
|
||||
}
|
||||
|
||||
//! Return true if the specified load or store can be folded
|
||||
bool
|
||||
SPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const {
|
||||
if (Ops.size() != 1) return false;
|
||||
|
||||
// Make sure this is a reg-reg copy.
|
||||
unsigned Opc = MI->getOpcode();
|
||||
|
||||
switch (Opc) {
|
||||
case SPU::ORv16i8:
|
||||
case SPU::ORv8i16:
|
||||
case SPU::ORv4i32:
|
||||
case SPU::ORv2i64:
|
||||
case SPU::ORr8:
|
||||
case SPU::ORr16:
|
||||
case SPU::ORr32:
|
||||
case SPU::ORr64:
|
||||
case SPU::ORf32:
|
||||
case SPU::ORf64:
|
||||
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg())
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - SPU, like PPC, can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
MachineInstr *
|
||||
SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const
|
||||
{
|
||||
if (Ops.size() != 1) return 0;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
unsigned Opc = MI->getOpcode();
|
||||
MachineInstr *NewMI = 0;
|
||||
|
||||
switch (Opc) {
|
||||
case SPU::ORv16i8:
|
||||
case SPU::ORv8i16:
|
||||
case SPU::ORv4i32:
|
||||
case SPU::ORv2i64:
|
||||
case SPU::ORr8:
|
||||
case SPU::ORr16:
|
||||
case SPU::ORr32:
|
||||
case SPU::ORr64:
|
||||
case SPU::ORf32:
|
||||
case SPU::ORf64:
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
|
||||
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(),
|
||||
get(SPU::STQDr32));
|
||||
|
||||
MIB.addReg(InReg, getKillRegState(isKill) | getUndefRegState(isUndef));
|
||||
NewMI = addFrameReference(MIB, FrameIndex);
|
||||
}
|
||||
} else { // move -> load
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc));
|
||||
|
||||
MIB.addReg(OutReg, RegState::Define | getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef));
|
||||
Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
|
||||
? SPU::STQDr32 : SPU::STQXr32;
|
||||
NewMI = addFrameReference(MIB, FrameIndex);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
//! Branch analysis
|
||||
/*!
|
||||
\note This code was kiped from PPC. There may be more branch analysis for
|
||||
|
@ -23,19 +23,6 @@ namespace llvm {
|
||||
class SPUInstrInfo : public TargetInstrInfoImpl {
|
||||
SPUTargetMachine &TM;
|
||||
const SPURegisterInfo RI;
|
||||
protected:
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
public:
|
||||
explicit SPUInstrInfo(SPUTargetMachine &tm);
|
||||
|
||||
@ -75,11 +62,6 @@ namespace llvm {
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
//! Return true if the specified load or store can be folded
|
||||
virtual
|
||||
bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const;
|
||||
|
||||
//! Reverses a branch's condition, returning false on success.
|
||||
virtual
|
||||
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
|
||||
|
@ -139,44 +139,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
.addImm(0).addFrameIndex(FI);
|
||||
}
|
||||
|
||||
MachineInstr *MBlazeInstrInfo::
|
||||
foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops, int FI) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
MachineInstr *NewMI = NULL;
|
||||
|
||||
switch (MI->getOpcode()) {
|
||||
case MBlaze::OR:
|
||||
case MBlaze::ADD:
|
||||
if ((MI->getOperand(0).isReg()) &&
|
||||
(MI->getOperand(2).isReg()) &&
|
||||
(MI->getOperand(2).getReg() == MBlaze::R0) &&
|
||||
(MI->getOperand(1).isReg())) {
|
||||
if (Ops[0] == 0) { // COPY -> STORE
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(MBlaze::SW))
|
||||
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
|
||||
.addImm(0).addFrameIndex(FI);
|
||||
} else { // COPY -> LOAD
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(MBlaze::LW))
|
||||
.addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef))
|
||||
.addImm(0).addFrameIndex(FI);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Branch Analysis
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -216,18 +216,6 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Insert nop instruction when hazard condition is found
|
||||
virtual void insertNoop(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI) const;
|
||||
|
@ -261,80 +261,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
llvm_unreachable("Register class not handled!");
|
||||
}
|
||||
|
||||
MachineInstr *MipsInstrInfo::
|
||||
foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops, int FI) const
|
||||
{
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
MachineInstr *NewMI = NULL;
|
||||
|
||||
switch (MI->getOpcode()) {
|
||||
case Mips::ADDu:
|
||||
if ((MI->getOperand(0).isReg()) &&
|
||||
(MI->getOperand(1).isReg()) &&
|
||||
(MI->getOperand(1).getReg() == Mips::ZERO) &&
|
||||
(MI->getOperand(2).isReg())) {
|
||||
if (Ops[0] == 0) { // COPY -> STORE
|
||||
unsigned SrcReg = MI->getOperand(2).getReg();
|
||||
bool isKill = MI->getOperand(2).isKill();
|
||||
bool isUndef = MI->getOperand(2).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Mips::SW))
|
||||
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
|
||||
.addImm(0).addFrameIndex(FI);
|
||||
} else { // COPY -> LOAD
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Mips::LW))
|
||||
.addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef))
|
||||
.addImm(0).addFrameIndex(FI);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case Mips::FMOV_S32:
|
||||
case Mips::FMOV_D32:
|
||||
if ((MI->getOperand(0).isReg()) &&
|
||||
(MI->getOperand(1).isReg())) {
|
||||
const TargetRegisterClass
|
||||
*RC = RI.getRegClass(MI->getOperand(0).getReg());
|
||||
unsigned StoreOpc, LoadOpc;
|
||||
bool IsMips1 = TM.getSubtarget<MipsSubtarget>().isMips1();
|
||||
|
||||
if (RC == Mips::FGR32RegisterClass) {
|
||||
LoadOpc = Mips::LWC1; StoreOpc = Mips::SWC1;
|
||||
} else {
|
||||
assert(RC == Mips::AFGR64RegisterClass);
|
||||
// Mips1 doesn't have ldc/sdc instructions.
|
||||
if (IsMips1) break;
|
||||
LoadOpc = Mips::LDC1; StoreOpc = Mips::SDC1;
|
||||
}
|
||||
|
||||
if (Ops[0] == 0) { // COPY -> STORE
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(2).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(StoreOpc))
|
||||
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
|
||||
.addImm(0).addFrameIndex(FI) ;
|
||||
} else { // COPY -> LOAD
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(LoadOpc))
|
||||
.addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef))
|
||||
.addImm(0).addFrameIndex(FI);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Branch Analysis
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -222,18 +222,6 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual
|
||||
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
|
||||
|
||||
|
@ -649,121 +649,6 @@ PPCInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
|
||||
return &*MIB;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
MachineInstr *PPCInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
|
||||
// it takes more than one instruction to store it.
|
||||
unsigned Opc = MI->getOpcode();
|
||||
unsigned OpNum = Ops[0];
|
||||
|
||||
MachineInstr *NewMI = NULL;
|
||||
if ((Opc == PPC::OR &&
|
||||
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::STW))
|
||||
.addReg(InReg,
|
||||
getKillRegState(isKill) |
|
||||
getUndefRegState(isUndef)),
|
||||
FrameIndex);
|
||||
} else { // move -> load
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::LWZ))
|
||||
.addReg(OutReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef)),
|
||||
FrameIndex);
|
||||
}
|
||||
} else if ((Opc == PPC::OR8 &&
|
||||
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::STD))
|
||||
.addReg(InReg,
|
||||
getKillRegState(isKill) |
|
||||
getUndefRegState(isUndef)),
|
||||
FrameIndex);
|
||||
} else { // move -> load
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::LD))
|
||||
.addReg(OutReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef)),
|
||||
FrameIndex);
|
||||
}
|
||||
} else if (Opc == PPC::FMR || Opc == PPC::FMRSD) {
|
||||
// The register may be F4RC or F8RC, and that determines the memory op.
|
||||
unsigned OrigReg = MI->getOperand(OpNum).getReg();
|
||||
// We cannot tell the register class from a physreg alone.
|
||||
if (TargetRegisterInfo::isPhysicalRegister(OrigReg))
|
||||
return NULL;
|
||||
const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(OrigReg);
|
||||
const bool is64 = RC == PPC::F8RCRegisterClass;
|
||||
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned InReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(),
|
||||
get(is64 ? PPC::STFD : PPC::STFS))
|
||||
.addReg(InReg,
|
||||
getKillRegState(isKill) |
|
||||
getUndefRegState(isUndef)),
|
||||
FrameIndex);
|
||||
} else { // move -> load
|
||||
unsigned OutReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(),
|
||||
get(is64 ? PPC::LFD : PPC::LFS))
|
||||
.addReg(OutReg,
|
||||
RegState::Define |
|
||||
getDeadRegState(isDead) |
|
||||
getUndefRegState(isUndef)),
|
||||
FrameIndex);
|
||||
}
|
||||
}
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
bool PPCInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const {
|
||||
if (Ops.size() != 1) return false;
|
||||
|
||||
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
|
||||
// it takes more than one instruction to store it.
|
||||
unsigned Opc = MI->getOpcode();
|
||||
|
||||
if ((Opc == PPC::OR &&
|
||||
MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
|
||||
return true;
|
||||
else if ((Opc == PPC::OR8 &&
|
||||
MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
|
||||
return true;
|
||||
else if (Opc == PPC::FMR || Opc == PPC::FMRSD)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool PPCInstrInfo::
|
||||
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
||||
assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
|
||||
|
@ -134,23 +134,6 @@ public:
|
||||
const MDNode *MDPtr,
|
||||
DebugLoc DL) const;
|
||||
|
||||
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const;
|
||||
|
||||
virtual
|
||||
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
|
||||
|
||||
|
@ -174,61 +174,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
llvm_unreachable("Can't load this register from stack slot");
|
||||
}
|
||||
|
||||
MachineInstr *SparcInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FI) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
bool isFloat = false;
|
||||
MachineInstr *NewMI = NULL;
|
||||
switch (MI->getOpcode()) {
|
||||
case SP::ORrr:
|
||||
if (MI->getOperand(1).isReg() && MI->getOperand(1).getReg() == SP::G0&&
|
||||
MI->getOperand(0).isReg() && MI->getOperand(2).isReg()) {
|
||||
if (OpNum == 0) // COPY -> STORE
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(SP::STri))
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0)
|
||||
.addReg(MI->getOperand(2).getReg());
|
||||
else // COPY -> LOAD
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(SP::LDri),
|
||||
MI->getOperand(0).getReg())
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0);
|
||||
}
|
||||
break;
|
||||
case SP::FMOVS:
|
||||
isFloat = true;
|
||||
// FALLTHROUGH
|
||||
case SP::FMOVD:
|
||||
if (OpNum == 0) { // COPY -> STORE
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
bool isUndef = MI->getOperand(1).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(),
|
||||
get(isFloat ? SP::STFri : SP::STDFri))
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0)
|
||||
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
|
||||
} else { // COPY -> LOAD
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
bool isUndef = MI->getOperand(0).isUndef();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(),
|
||||
get(isFloat ? SP::LDFri : SP::LDDFri))
|
||||
.addReg(DstReg, RegState::Define |
|
||||
getDeadRegState(isDead) | getUndefRegState(isUndef))
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
unsigned SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const
|
||||
{
|
||||
SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>();
|
||||
|
@ -88,18 +88,6 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned getGlobalBaseReg(MachineFunction *MF) const;
|
||||
};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user