mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
ArrayRefize memory operand folding. NFC.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230846 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
31fbd9f7b0
commit
b22e2f9f2a
@ -672,16 +672,15 @@ public:
|
||||
/// operand folded, otherwise NULL is returned.
|
||||
/// The new instruction is inserted before MI, and the client is responsible
|
||||
/// for removing the old instruction.
|
||||
MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
ArrayRef<unsigned> Ops, int FrameIndex) const;
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const;
|
||||
MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const;
|
||||
|
||||
/// hasPattern - return true when there is potentially a faster code sequence
|
||||
/// for an instruction chain ending in \p Root. All potential pattern are
|
||||
@ -723,20 +722,20 @@ protected:
|
||||
/// foldMemoryOperandImpl - Target-dependent implementation for
|
||||
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
|
||||
/// take care of adding a MachineMemOperand to the newly created instruction.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// foldMemoryOperandImpl - Target-dependent implementation for
|
||||
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
|
||||
/// take care of adding a MachineMemOperand to the newly created instruction.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -786,9 +785,8 @@ protected:
|
||||
public:
|
||||
/// canFoldMemoryOperand - Returns true for the specified load / store if
|
||||
/// folding is possible.
|
||||
virtual
|
||||
bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const;
|
||||
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops) const;
|
||||
|
||||
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
|
||||
/// a store or a load and a store into two or more instruction. If this is
|
||||
|
@ -377,16 +377,13 @@ void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
|
||||
llvm_unreachable("Not a MachO target");
|
||||
}
|
||||
|
||||
bool TargetInstrInfo::
|
||||
canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const {
|
||||
bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops) const {
|
||||
return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
|
||||
}
|
||||
|
||||
static MachineInstr* foldPatchpoint(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex,
|
||||
static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops, int FrameIndex,
|
||||
const TargetInstrInfo &TII) {
|
||||
unsigned StartIdx = 0;
|
||||
switch (MI->getOpcode()) {
|
||||
@ -405,9 +402,8 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF,
|
||||
|
||||
// Return false if any operands requested for folding are not foldable (not
|
||||
// part of the stackmap's live values).
|
||||
for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
|
||||
I != E; ++I) {
|
||||
if (*I < StartIdx)
|
||||
for (unsigned Op : Ops) {
|
||||
if (Op < StartIdx)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -448,10 +444,9 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF,
|
||||
/// operand folded, otherwise NULL is returned. The client is responsible for
|
||||
/// removing the old instruction and adding the new one in the instruction
|
||||
/// stream.
|
||||
MachineInstr*
|
||||
TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FI) const {
|
||||
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FI) const {
|
||||
unsigned Flags = 0;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
if (MI->getOperand(Ops[i]).isDef())
|
||||
@ -517,10 +512,9 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
MachineInstr*
|
||||
TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
|
||||
#ifndef NDEBUG
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
|
@ -2068,10 +2068,10 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
|
||||
.setMIFlag(Flag);
|
||||
}
|
||||
|
||||
MachineInstr *
|
||||
AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const {
|
||||
// This is a bit of a hack. Consider this instruction:
|
||||
//
|
||||
// %vreg0<def> = COPY %SP; GPR64all:%vreg0
|
||||
|
@ -129,10 +129,9 @@ public:
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
using TargetInstrInfo::foldMemoryOperandImpl;
|
||||
MachineInstr *
|
||||
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const override;
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const override;
|
||||
|
||||
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
|
||||
MachineBasicBlock *&FBB,
|
||||
|
@ -550,11 +550,10 @@ void HexagonInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
llvm_unreachable("Unimplemented");
|
||||
}
|
||||
|
||||
|
||||
MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FI) const {
|
||||
MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FI) const {
|
||||
// Hexagon_TODO: Implement.
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -102,15 +102,13 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const override;
|
||||
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const override {
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const override {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -152,26 +152,22 @@ bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
MachineInstr *
|
||||
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const {
|
||||
// TODO: Implement this function
|
||||
return nullptr;
|
||||
}
|
||||
MachineInstr*
|
||||
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
MachineInstr *
|
||||
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
// TODO: Implement this function
|
||||
return nullptr;
|
||||
}
|
||||
bool
|
||||
AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const {
|
||||
bool AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops) const {
|
||||
// TODO: Implement this function
|
||||
return false;
|
||||
}
|
||||
|
@ -85,14 +85,13 @@ public:
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
protected:
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const override;
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const override;
|
||||
|
||||
public:
|
||||
/// \returns the smallest register index that will be accessed by an indirect
|
||||
/// read or write or -1 if indirect addressing is not used by this program.
|
||||
@ -103,7 +102,7 @@ public:
|
||||
int getIndirectIndexEnd(const MachineFunction &MF) const;
|
||||
|
||||
bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const override;
|
||||
ArrayRef<unsigned> Ops) const override;
|
||||
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
|
||||
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
|
||||
SmallVectorImpl<MachineInstr *> &NewMIs) const override;
|
||||
|
@ -743,11 +743,10 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MachineInstr *
|
||||
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const {
|
||||
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
unsigned Size = MFI->getObjectSize(FrameIndex);
|
||||
unsigned Opcode = MI->getOpcode();
|
||||
@ -862,9 +861,9 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
}
|
||||
|
||||
MachineInstr *
|
||||
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -186,11 +186,11 @@ public:
|
||||
MachineBasicBlock::iterator &MBBI,
|
||||
LiveVariables *LV) const override;
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const override;
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const override;
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const override;
|
||||
bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override;
|
||||
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
|
||||
override;
|
||||
|
@ -4573,9 +4573,7 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr *MI,
|
||||
return nullptr;
|
||||
|
||||
// Check whether we can fold the def into SrcOperandId.
|
||||
SmallVector<unsigned, 8> Ops;
|
||||
Ops.push_back(SrcOperandId);
|
||||
MachineInstr *FoldMI = foldMemoryOperand(MI, Ops, DefMI);
|
||||
MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandId, DefMI);
|
||||
if (FoldMI) {
|
||||
FoldAsLoadDefReg = 0;
|
||||
return FoldMI;
|
||||
@ -4670,7 +4668,7 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
|
||||
}
|
||||
|
||||
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
|
||||
const SmallVectorImpl<MachineOperand> &MOs,
|
||||
ArrayRef<MachineOperand> MOs,
|
||||
MachineInstr *MI,
|
||||
const TargetInstrInfo &TII) {
|
||||
// Create the base instruction with the memory operand as the first part.
|
||||
@ -4697,9 +4695,8 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
|
||||
return MIB;
|
||||
}
|
||||
|
||||
static MachineInstr *FuseInst(MachineFunction &MF,
|
||||
unsigned Opcode, unsigned OpNo,
|
||||
const SmallVectorImpl<MachineOperand> &MOs,
|
||||
static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
|
||||
unsigned OpNo, ArrayRef<MachineOperand> MOs,
|
||||
MachineInstr *MI, const TargetInstrInfo &TII) {
|
||||
// Omit the implicit operands, something BuildMI can't do.
|
||||
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
|
||||
@ -4723,7 +4720,7 @@ static MachineInstr *FuseInst(MachineFunction &MF,
|
||||
}
|
||||
|
||||
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
|
||||
const SmallVectorImpl<MachineOperand> &MOs,
|
||||
ArrayRef<MachineOperand> MOs,
|
||||
MachineInstr *MI) {
|
||||
MachineFunction &MF = *MI->getParent()->getParent();
|
||||
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
|
||||
@ -4736,12 +4733,12 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
|
||||
return MIB.addImm(0);
|
||||
}
|
||||
|
||||
MachineInstr*
|
||||
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI, unsigned OpNum,
|
||||
const SmallVectorImpl<MachineOperand> &MOs,
|
||||
unsigned Size, unsigned Align,
|
||||
bool AllowCommute) const {
|
||||
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
unsigned OpNum,
|
||||
ArrayRef<MachineOperand> MOs,
|
||||
unsigned Size, unsigned Align,
|
||||
bool AllowCommute) const {
|
||||
const DenseMap<unsigned,
|
||||
std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
|
||||
bool isCallRegIndirect = Subtarget.callRegIndirect();
|
||||
@ -5104,10 +5101,10 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
|
||||
MI->addRegisterKilled(Reg, TRI, true);
|
||||
}
|
||||
|
||||
MachineInstr*
|
||||
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const {
|
||||
// Check switch flag
|
||||
if (NoFusing) return nullptr;
|
||||
|
||||
@ -5145,10 +5142,9 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
} else if (Ops.size() != 1)
|
||||
return nullptr;
|
||||
|
||||
SmallVector<MachineOperand,4> MOs;
|
||||
MOs.push_back(MachineOperand::CreateFI(FrameIndex));
|
||||
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs,
|
||||
Size, Alignment, /*AllowCommute=*/true);
|
||||
return foldMemoryOperandImpl(MF, MI, Ops[0],
|
||||
MachineOperand::CreateFI(FrameIndex), Size,
|
||||
Alignment, /*AllowCommute=*/true);
|
||||
}
|
||||
|
||||
static bool isPartialRegisterLoad(const MachineInstr &LoadMI,
|
||||
@ -5170,9 +5166,9 @@ static bool isPartialRegisterLoad(const MachineInstr &LoadMI,
|
||||
return false;
|
||||
}
|
||||
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
// If loading from a FrameIndex, fold directly from the FrameIndex.
|
||||
unsigned NumOps = LoadMI->getDesc().getNumOperands();
|
||||
@ -5304,9 +5300,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
/*Size=*/0, Alignment, /*AllowCommute=*/true);
|
||||
}
|
||||
|
||||
|
||||
bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const {
|
||||
ArrayRef<unsigned> Ops) const {
|
||||
// Check switch flag
|
||||
if (NoFusing) return 0;
|
||||
|
||||
|
@ -305,23 +305,21 @@ public:
|
||||
/// folding and return true, otherwise it should return false. If it folds
|
||||
/// the instruction, it is likely that the MachineInstruction the iterator
|
||||
/// references has been changed.
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
int FrameIndex) const override;
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const override;
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
ArrayRef<unsigned> Ops,
|
||||
MachineInstr *LoadMI) const override;
|
||||
|
||||
/// canFoldMemoryOperand - Returns true if the specified load / store is
|
||||
/// folding is possible.
|
||||
bool canFoldMemoryOperand(const MachineInstr*,
|
||||
const SmallVectorImpl<unsigned> &) const override;
|
||||
bool canFoldMemoryOperand(const MachineInstr *,
|
||||
ArrayRef<unsigned>) const override;
|
||||
|
||||
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
|
||||
/// a store or a load and a store into two or more instruction. If this is
|
||||
@ -406,10 +404,9 @@ public:
|
||||
void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
unsigned OpNum,
|
||||
const SmallVectorImpl<MachineOperand> &MOs,
|
||||
ArrayRef<MachineOperand> MOs,
|
||||
unsigned Size, unsigned Alignment,
|
||||
bool AllowCommute) const;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user