mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Split foldMemoryOperand into public non-virtual and protected virtual
parts, and add target-independent code to add/preserve MachineMemOperands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60488 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
15511cf166
commit
c54baa2d43
@ -305,23 +305,41 @@ public:
|
||||
/// operand folded, otherwise NULL is returned. The client is responsible for
|
||||
/// removing the old instruction and adding the new one in the instruction
|
||||
/// stream.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const;
|
||||
|
||||
protected:
|
||||
/// foldMemoryOperandImpl - Target-dependent implementation for
|
||||
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
|
||||
/// take care of adding a MachineMemOperand to the newly created instruction.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
/// foldMemoryOperandImpl - Target-dependent implementation for
|
||||
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
|
||||
/// take care of adding a MachineMemOperand to the newly created instruction.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
public:
|
||||
/// canFoldMemoryOperand - Returns true if the specified load / store is
|
||||
/// folding is possible.
|
||||
virtual
|
||||
|
@ -14,8 +14,10 @@
|
||||
|
||||
#include "llvm/Target/TargetInstrInfo.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
||||
#include "llvm/CodeGen/PseudoSourceValue.h"
|
||||
using namespace llvm;
|
||||
|
||||
// commuteInstruction - The default implementation of this method just exchanges
|
||||
@ -124,3 +126,69 @@ TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
|
||||
}
|
||||
return FnSize;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
|
||||
/// slot into the specified machine instruction for the specified operand(s).
|
||||
/// If this is possible, a new instruction is returned with the specified
|
||||
/// operand folded, otherwise NULL is returned. The client is responsible for
|
||||
/// removing the old instruction and adding the new one in the instruction
|
||||
/// stream.
|
||||
MachineInstr*
|
||||
TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
unsigned Flags = 0;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
if (MI->getOperand(Ops[i]).isDef())
|
||||
Flags |= MachineMemOperand::MOStore;
|
||||
else
|
||||
Flags |= MachineMemOperand::MOLoad;
|
||||
|
||||
// Ask the target to do the actual folding.
|
||||
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
|
||||
if (!NewMI) return 0;
|
||||
|
||||
assert((!(Flags & MachineMemOperand::MOStore) ||
|
||||
NewMI->getDesc().mayStore()) &&
|
||||
"Folded a def to a non-store!");
|
||||
assert((!(Flags & MachineMemOperand::MOLoad) ||
|
||||
NewMI->getDesc().mayLoad()) &&
|
||||
"Folded a use to a non-load!");
|
||||
const MachineFrameInfo &MFI = *MF.getFrameInfo();
|
||||
assert(MFI.getObjectOffset(FrameIndex) != -1);
|
||||
MachineMemOperand MMO(PseudoSourceValue::getFixedStack(FrameIndex),
|
||||
Flags,
|
||||
MFI.getObjectOffset(FrameIndex),
|
||||
MFI.getObjectSize(FrameIndex),
|
||||
MFI.getObjectAlignment(FrameIndex));
|
||||
NewMI->addMemOperand(MF, MMO);
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
MachineInstr*
|
||||
TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
|
||||
#ifndef NDEBUG
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
|
||||
#endif
|
||||
|
||||
// Ask the target to do the actual folding.
|
||||
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
|
||||
if (!NewMI) return 0;
|
||||
|
||||
// Copy the memoperands from the load to the folded instruction.
|
||||
for (std::list<MachineMemOperand>::iterator I = LoadMI->memoperands_begin(),
|
||||
E = LoadMI->memoperands_end(); I != E; ++I)
|
||||
NewMI->addMemOperand(MF, *I);
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
@ -663,10 +663,10 @@ bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
return true;
|
||||
}
|
||||
|
||||
MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
MachineInstr *ARMInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FI) const {
|
||||
int FI) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
|
@ -211,15 +211,15 @@ public:
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -255,10 +255,10 @@ void AlphaInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
NewMIs.push_back(MIB);
|
||||
}
|
||||
|
||||
MachineInstr *AlphaInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
MachineInstr *AlphaInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
int FrameIndex) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
// Make sure this is a reg-reg copy.
|
||||
|
@ -69,15 +69,15 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -399,10 +399,10 @@ void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
/// foldMemoryOperand - SPU, like PPC, can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
MachineInstr *
|
||||
SPUInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const
|
||||
SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const
|
||||
{
|
||||
#if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
@ -79,16 +79,16 @@ namespace llvm {
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
//! Fold spills into load/store instructions
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
//! Fold any load/store to an operand
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
@ -279,9 +279,9 @@ void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
}
|
||||
|
||||
MachineInstr *MipsInstrInfo::
|
||||
foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops, int FI) const
|
||||
foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops, int FI) const
|
||||
{
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
@ -323,7 +323,7 @@ foldMemoryOperand(MachineFunction &MF,
|
||||
} else if (RC == Mips::AFGR64RegisterClass) {
|
||||
LoadOpc = Mips::LDC1; StoreOpc = Mips::SDC1;
|
||||
} else
|
||||
assert(0 && "foldMemoryOperand register unknown");
|
||||
assert(0 && "foldMemoryOperandImpl register unknown");
|
||||
|
||||
if (Ops[0] == 0) { // COPY -> STORE
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
|
@ -196,15 +196,15 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -655,10 +655,10 @@ void PPCInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
|
||||
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
MachineInstr *PPCInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
MachineInstr *PPCInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
|
||||
|
@ -142,15 +142,15 @@ public:
|
||||
|
||||
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -225,10 +225,10 @@ void SparcInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
return;
|
||||
}
|
||||
|
||||
MachineInstr *SparcInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
MachineInstr *SparcInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FI) const {
|
||||
int FI) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
|
@ -96,15 +96,15 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
@ -1976,9 +1976,9 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
|
||||
}
|
||||
|
||||
MachineInstr*
|
||||
X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *MI, unsigned i,
|
||||
const SmallVector<MachineOperand,4> &MOs) const{
|
||||
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI, unsigned i,
|
||||
const SmallVector<MachineOperand,4> &MOs) const{
|
||||
const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL;
|
||||
bool isTwoAddrFold = false;
|
||||
unsigned NumOps = MI->getDesc().getNumOperands();
|
||||
@ -2035,10 +2035,10 @@ X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
}
|
||||
|
||||
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
// Check switch flag
|
||||
if (NoFusing) return NULL;
|
||||
|
||||
@ -2079,13 +2079,13 @@ MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
|
||||
SmallVector<MachineOperand,4> MOs;
|
||||
MOs.push_back(MachineOperand::CreateFI(FrameIndex));
|
||||
return foldMemoryOperand(MF, MI, Ops[0], MOs);
|
||||
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs);
|
||||
}
|
||||
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
// Check switch flag
|
||||
if (NoFusing) return NULL;
|
||||
|
||||
@ -2158,7 +2158,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
for (unsigned i = NumOps - 4; i != NumOps; ++i)
|
||||
MOs.push_back(LoadMI->getOperand(i));
|
||||
}
|
||||
return foldMemoryOperand(MF, MI, Ops[0], MOs);
|
||||
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs);
|
||||
}
|
||||
|
||||
|
||||
|
@ -368,18 +368,18 @@ public:
|
||||
/// folding and return true, otherwise it should return false. If it folds
|
||||
/// the instruction, it is likely that the MachineInstruction the iterator
|
||||
/// references has been changed.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const;
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const;
|
||||
|
||||
/// canFoldMemoryOperand - Returns true if the specified load / store is
|
||||
/// folding is possible.
|
||||
@ -444,10 +444,10 @@ public:
|
||||
unsigned getGlobalBaseReg(MachineFunction *MF) const;
|
||||
|
||||
private:
|
||||
MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
unsigned OpNum,
|
||||
const SmallVector<MachineOperand,4> &MOs) const;
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
unsigned OpNum,
|
||||
const SmallVector<MachineOperand,4> &MOs) const;
|
||||
};
|
||||
|
||||
} // End llvm namespace
|
||||
|
Loading…
Reference in New Issue
Block a user