mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
Split foldMemoryOperand into public non-virtual and protected virtual
parts, and add target-independent code to add/preserve MachineMemOperands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60488 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
15511cf166
commit
c54baa2d43
@ -305,23 +305,41 @@ public:
|
||||
/// operand folded, otherwise NULL is returned. The client is responsible for
|
||||
/// removing the old instruction and adding the new one in the instruction
|
||||
/// stream.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const;
|
||||
|
||||
protected:
|
||||
/// foldMemoryOperandImpl - Target-dependent implementation for
|
||||
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
|
||||
/// take care of adding a MachineMemOperand to the newly created instruction.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
/// foldMemoryOperandImpl - Target-dependent implementation for
|
||||
/// foldMemoryOperand. Target-independent code in foldMemoryOperand will
|
||||
/// take care of adding a MachineMemOperand to the newly created instruction.
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
public:
|
||||
/// canFoldMemoryOperand - Returns true if the specified load / store is
|
||||
/// folding is possible.
|
||||
virtual
|
||||
|
@ -14,8 +14,10 @@
|
||||
|
||||
#include "llvm/Target/TargetInstrInfo.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
||||
#include "llvm/CodeGen/PseudoSourceValue.h"
|
||||
using namespace llvm;
|
||||
|
||||
// commuteInstruction - The default implementation of this method just exchanges
|
||||
@ -124,3 +126,69 @@ TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
|
||||
}
|
||||
return FnSize;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
|
||||
/// slot into the specified machine instruction for the specified operand(s).
|
||||
/// If this is possible, a new instruction is returned with the specified
|
||||
/// operand folded, otherwise NULL is returned. The client is responsible for
|
||||
/// removing the old instruction and adding the new one in the instruction
|
||||
/// stream.
|
||||
MachineInstr*
|
||||
TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
unsigned Flags = 0;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
if (MI->getOperand(Ops[i]).isDef())
|
||||
Flags |= MachineMemOperand::MOStore;
|
||||
else
|
||||
Flags |= MachineMemOperand::MOLoad;
|
||||
|
||||
// Ask the target to do the actual folding.
|
||||
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
|
||||
if (!NewMI) return 0;
|
||||
|
||||
assert((!(Flags & MachineMemOperand::MOStore) ||
|
||||
NewMI->getDesc().mayStore()) &&
|
||||
"Folded a def to a non-store!");
|
||||
assert((!(Flags & MachineMemOperand::MOLoad) ||
|
||||
NewMI->getDesc().mayLoad()) &&
|
||||
"Folded a use to a non-load!");
|
||||
const MachineFrameInfo &MFI = *MF.getFrameInfo();
|
||||
assert(MFI.getObjectOffset(FrameIndex) != -1);
|
||||
MachineMemOperand MMO(PseudoSourceValue::getFixedStack(FrameIndex),
|
||||
Flags,
|
||||
MFI.getObjectOffset(FrameIndex),
|
||||
MFI.getObjectSize(FrameIndex),
|
||||
MFI.getObjectAlignment(FrameIndex));
|
||||
NewMI->addMemOperand(MF, MMO);
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
MachineInstr*
|
||||
TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
|
||||
#ifndef NDEBUG
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
|
||||
#endif
|
||||
|
||||
// Ask the target to do the actual folding.
|
||||
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
|
||||
if (!NewMI) return 0;
|
||||
|
||||
// Copy the memoperands from the load to the folded instruction.
|
||||
for (std::list<MachineMemOperand>::iterator I = LoadMI->memoperands_begin(),
|
||||
E = LoadMI->memoperands_end(); I != E; ++I)
|
||||
NewMI->addMemOperand(MF, *I);
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
@ -663,7 +663,7 @@ bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
return true;
|
||||
}
|
||||
|
||||
MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *ARMInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FI) const {
|
||||
|
@ -211,12 +211,12 @@ public:
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
|
@ -255,7 +255,7 @@ void AlphaInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
NewMIs.push_back(MIB);
|
||||
}
|
||||
|
||||
MachineInstr *AlphaInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *AlphaInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
|
@ -69,12 +69,12 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
|
@ -399,7 +399,7 @@ void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
/// foldMemoryOperand - SPU, like PPC, can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
MachineInstr *
|
||||
SPUInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const
|
||||
|
@ -79,13 +79,13 @@ namespace llvm {
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
//! Fold spills into load/store instructions
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
//! Fold any load/store to an operand
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
|
@ -279,7 +279,7 @@ void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
}
|
||||
|
||||
MachineInstr *MipsInstrInfo::
|
||||
foldMemoryOperand(MachineFunction &MF,
|
||||
foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops, int FI) const
|
||||
{
|
||||
@ -323,7 +323,7 @@ foldMemoryOperand(MachineFunction &MF,
|
||||
} else if (RC == Mips::AFGR64RegisterClass) {
|
||||
LoadOpc = Mips::LDC1; StoreOpc = Mips::SDC1;
|
||||
} else
|
||||
assert(0 && "foldMemoryOperand register unknown");
|
||||
assert(0 && "foldMemoryOperandImpl register unknown");
|
||||
|
||||
if (Ops[0] == 0) { // COPY -> STORE
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
|
@ -196,12 +196,12 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
|
@ -655,7 +655,7 @@ void PPCInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
|
||||
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
MachineInstr *PPCInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *PPCInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
|
@ -142,12 +142,12 @@ public:
|
||||
|
||||
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
|
||||
/// copy instructions, turning them into load/store instructions.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
|
@ -225,7 +225,7 @@ void SparcInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
return;
|
||||
}
|
||||
|
||||
MachineInstr *SparcInstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr *SparcInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FI) const {
|
||||
|
@ -96,12 +96,12 @@ public:
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
|
@ -1976,7 +1976,7 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
|
||||
}
|
||||
|
||||
MachineInstr*
|
||||
X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI, unsigned i,
|
||||
const SmallVector<MachineOperand,4> &MOs) const{
|
||||
const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL;
|
||||
@ -2035,7 +2035,7 @@ X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
}
|
||||
|
||||
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const {
|
||||
@ -2079,10 +2079,10 @@ MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
|
||||
SmallVector<MachineOperand,4> MOs;
|
||||
MOs.push_back(MachineOperand::CreateFI(FrameIndex));
|
||||
return foldMemoryOperand(MF, MI, Ops[0], MOs);
|
||||
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs);
|
||||
}
|
||||
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr *LoadMI) const {
|
||||
@ -2158,7 +2158,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
|
||||
for (unsigned i = NumOps - 4; i != NumOps; ++i)
|
||||
MOs.push_back(LoadMI->getOperand(i));
|
||||
}
|
||||
return foldMemoryOperand(MF, MI, Ops[0], MOs);
|
||||
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs);
|
||||
}
|
||||
|
||||
|
||||
|
@ -368,7 +368,7 @@ public:
|
||||
/// folding and return true, otherwise it should return false. If it folds
|
||||
/// the instruction, it is likely that the MachineInstruction the iterator
|
||||
/// references has been changed.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
@ -376,7 +376,7 @@ public:
|
||||
/// foldMemoryOperand - Same as the previous version except it allows folding
|
||||
/// of any load and store from / to any address, not just from a specific
|
||||
/// stack slot.
|
||||
virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const;
|
||||
@ -444,7 +444,7 @@ public:
|
||||
unsigned getGlobalBaseReg(MachineFunction *MF) const;
|
||||
|
||||
private:
|
||||
MachineInstr* foldMemoryOperand(MachineFunction &MF,
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
unsigned OpNum,
|
||||
const SmallVector<MachineOperand,4> &MOs) const;
|
||||
|
Loading…
Reference in New Issue
Block a user