mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-01 18:33:56 +00:00
[TargetInstrInfo] Rename getLdStBaseRegImmOfs and implement for x86.
Summary: TargetInstrInfo::getLdStBaseRegImmOfs to TargetInstrInfo::getMemOpBaseRegImmOfs and implement for x86. The implementation only handles a few easy cases now and will be made more sophisticated in the future. This is NFCI: the only user of `getLdStBaseRegImmOfs` (now `getmemOpBaseRegImmOfs`) is `LoadClusterMotion` and `LoadClusterMotion` is disabled for x86. Reviewers: reames, ab, MatzeB, atrick Reviewed By: MatzeB, atrick Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D10199 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@239741 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
1991e2a4df
commit
319c91bbb0
@ -827,10 +827,11 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Get the base register and byte offset of a load/store instr.
|
||||
virtual bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
|
||||
unsigned &BaseReg, unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
/// Get the base register and byte offset of an instruction that reads/writes
|
||||
/// memory.
|
||||
virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1271,7 +1271,7 @@ void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
|
||||
SUnit *SU = Loads[Idx];
|
||||
unsigned BaseReg;
|
||||
unsigned Offset;
|
||||
if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
|
||||
if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
|
||||
LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
|
||||
}
|
||||
if (LoadRecords.size() < 2)
|
||||
|
@ -629,8 +629,8 @@ AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
|
||||
// base registers are identical, and the offset of a lower memory access +
|
||||
// the width doesn't overlap the offset of a higher memory access,
|
||||
// then the memory accesses are different.
|
||||
if (getLdStBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
|
||||
getLdStBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
|
||||
if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
|
||||
getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
|
||||
if (BaseRegA == BaseRegB) {
|
||||
int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
|
||||
int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
|
||||
@ -1310,9 +1310,9 @@ void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
|
||||
}
|
||||
|
||||
bool
|
||||
AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
switch (LdSt->getOpcode()) {
|
||||
default:
|
||||
return false;
|
||||
@ -1336,7 +1336,7 @@ AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
};
|
||||
}
|
||||
|
||||
bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
|
||||
bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
|
||||
MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
// Handle only loads/stores with base register followed by immediate offset.
|
||||
@ -1434,7 +1434,7 @@ bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
|
||||
|
||||
/// Detect opportunities for ldp/stp formation.
|
||||
///
|
||||
/// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
|
||||
/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
|
||||
bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
|
||||
MachineInstr *SecondLdSt,
|
||||
unsigned NumLoads) const {
|
||||
@ -1443,7 +1443,7 @@ bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
|
||||
return false;
|
||||
if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
|
||||
return false;
|
||||
// getLdStBaseRegImmOfs guarantees that oper 2 isImm.
|
||||
// getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
|
||||
unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
|
||||
// Allow 6 bits of positive range.
|
||||
if (Ofs1 > 64)
|
||||
|
@ -90,13 +90,13 @@ public:
|
||||
/// Hint that pairing the given load or store is unprofitable.
|
||||
void suppressLdStPair(MachineInstr *MI) const;
|
||||
|
||||
bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
bool getLdStBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
int &Offset, int &Width,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
int &Offset, int &Width,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
bool enableClusterLoads() const override { return true; }
|
||||
|
||||
|
@ -142,7 +142,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
|
||||
continue;
|
||||
unsigned BaseReg;
|
||||
unsigned Offset;
|
||||
if (TII->getLdStBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
|
||||
if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
|
||||
if (PrevBaseReg == BaseReg) {
|
||||
// If this block can take STPs, skip ahead to the next block.
|
||||
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
|
||||
|
@ -200,9 +200,9 @@ static bool isStride64(unsigned Opc) {
|
||||
}
|
||||
}
|
||||
|
||||
bool SIInstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt,
|
||||
unsigned &BaseReg, unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
unsigned Opc = LdSt->getOpcode();
|
||||
if (isDS(Opc)) {
|
||||
const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
|
||||
@ -1053,8 +1053,8 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
|
||||
unsigned BaseReg0, Offset0;
|
||||
unsigned BaseReg1, Offset1;
|
||||
|
||||
if (getLdStBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
|
||||
getLdStBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
|
||||
if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
|
||||
getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
|
||||
assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() &&
|
||||
"read2 / write2 not expected here yet");
|
||||
unsigned Width0 = (*MIa->memoperands_begin())->getSize();
|
||||
|
@ -79,9 +79,9 @@ public:
|
||||
int64_t &Offset1,
|
||||
int64_t &Offset2) const override;
|
||||
|
||||
bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
|
||||
unsigned &BaseReg, unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const final;
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const final;
|
||||
|
||||
bool shouldClusterLoads(MachineInstr *FirstLdSt,
|
||||
MachineInstr *SecondLdSt,
|
||||
|
@ -3965,6 +3965,36 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
|
||||
}
|
||||
}
|
||||
|
||||
bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
const MCInstrDesc &Desc = MemOp->getDesc();
|
||||
int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode());
|
||||
if (MemRefBegin < 0)
|
||||
return false;
|
||||
|
||||
MemRefBegin += X86II::getOperandBias(Desc);
|
||||
|
||||
BaseReg = MemOp->getOperand(MemRefBegin + X86::AddrBaseReg).getReg();
|
||||
if (MemOp->getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
|
||||
return false;
|
||||
|
||||
if (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
|
||||
X86::NoRegister)
|
||||
return false;
|
||||
|
||||
const MachineOperand &DispMO = MemOp->getOperand(MemRefBegin + X86::AddrDisp);
|
||||
|
||||
// Displacement can be symbolic
|
||||
if (!DispMO.isImm())
|
||||
return false;
|
||||
|
||||
Offset = DispMO.getImm();
|
||||
|
||||
return (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() ==
|
||||
X86::NoRegister);
|
||||
}
|
||||
|
||||
static unsigned getStoreRegOpcode(unsigned SrcReg,
|
||||
const TargetRegisterClass *RC,
|
||||
bool isStackAligned,
|
||||
|
@ -267,6 +267,10 @@ public:
|
||||
MachineBasicBlock *&FBB,
|
||||
SmallVectorImpl<MachineOperand> &Cond,
|
||||
bool AllowModify) const override;
|
||||
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
unsigned &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
|
||||
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
||||
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
|
||||
|
Loading…
x
Reference in New Issue
Block a user