mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-12 15:05:06 +00:00
Add two target hooks to determine whether two loads are near and should be scheduled together.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@94147 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
cf220d5b89
commit
96dc115ef3
@ -486,6 +486,30 @@ public:
|
||||
unsigned *LoadRegIndex = 0) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
|
||||
/// to determine if two loads are loading from the same base address. It
|
||||
/// should only return true if the base pointers are the same and the
|
||||
/// only differences between the two addresses are the offset. It also returns
|
||||
/// the offsets by reference.
|
||||
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
||||
int64_t &Offset1, int64_t &Offset2) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
||||
/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// be scheduled togther. On some targets if two loads are loading from
|
||||
/// addresses in the same cache line, it's better if they are scheduled
|
||||
/// together. This function takes two integers that represent the load offsets
|
||||
/// from the common base address. It returns true if it decides it's desirable
|
||||
/// to schedule the two loads together. "NumLoads" is the number of loads that
|
||||
/// have already been scheduled after Load1.
|
||||
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
||||
int64_t Offset1, int64_t Offset2,
|
||||
unsigned NumLoads) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// ReverseBranchCondition - Reverses the branch condition of the specified
|
||||
/// condition list, returning false on success and true if it cannot be
|
||||
|
@ -2868,6 +2868,136 @@ unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
|
||||
return I->second.first;
|
||||
}
|
||||
|
||||
bool
|
||||
X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
||||
int64_t &Offset1, int64_t &Offset2) const {
|
||||
if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
|
||||
return false;
|
||||
unsigned Opc1 = Load1->getMachineOpcode();
|
||||
unsigned Opc2 = Load2->getMachineOpcode();
|
||||
switch (Opc1) {
|
||||
default: return false;
|
||||
case X86::MOV8rm:
|
||||
case X86::MOV16rm:
|
||||
case X86::MOV32rm:
|
||||
case X86::MOV64rm:
|
||||
case X86::LD_Fp32m:
|
||||
case X86::LD_Fp64m:
|
||||
case X86::LD_Fp80m:
|
||||
case X86::MOVSSrm:
|
||||
case X86::MOVSDrm:
|
||||
case X86::MMX_MOVD64rm:
|
||||
case X86::MMX_MOVQ64rm:
|
||||
case X86::FsMOVAPSrm:
|
||||
case X86::FsMOVAPDrm:
|
||||
case X86::MOVAPSrm:
|
||||
case X86::MOVUPSrm:
|
||||
case X86::MOVUPSrm_Int:
|
||||
case X86::MOVAPDrm:
|
||||
case X86::MOVDQArm:
|
||||
case X86::MOVDQUrm:
|
||||
case X86::MOVDQUrm_Int:
|
||||
break;
|
||||
}
|
||||
switch (Opc2) {
|
||||
default: return false;
|
||||
case X86::MOV8rm:
|
||||
case X86::MOV16rm:
|
||||
case X86::MOV32rm:
|
||||
case X86::MOV64rm:
|
||||
case X86::LD_Fp32m:
|
||||
case X86::LD_Fp64m:
|
||||
case X86::LD_Fp80m:
|
||||
case X86::MOVSSrm:
|
||||
case X86::MOVSDrm:
|
||||
case X86::MMX_MOVD64rm:
|
||||
case X86::MMX_MOVQ64rm:
|
||||
case X86::FsMOVAPSrm:
|
||||
case X86::FsMOVAPDrm:
|
||||
case X86::MOVAPSrm:
|
||||
case X86::MOVUPSrm:
|
||||
case X86::MOVUPSrm_Int:
|
||||
case X86::MOVAPDrm:
|
||||
case X86::MOVDQArm:
|
||||
case X86::MOVDQUrm:
|
||||
case X86::MOVDQUrm_Int:
|
||||
break;
|
||||
}
|
||||
|
||||
// Check if chain operands and base addresses match.
|
||||
if (Load1->getOperand(0) != Load2->getOperand(0) ||
|
||||
Load1->getOperand(5) != Load2->getOperand(5))
|
||||
return false;
|
||||
// Segment operands should match as well.
|
||||
if (Load1->getOperand(4) != Load2->getOperand(4))
|
||||
return false;
|
||||
// Scale should be 1, Index should be Reg0.
|
||||
if (Load1->getOperand(1) == Load2->getOperand(1) &&
|
||||
Load1->getOperand(2) == Load2->getOperand(2)) {
|
||||
if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1)
|
||||
return false;
|
||||
SDValue Op2 = Load1->getOperand(2);
|
||||
if (!isa<RegisterSDNode>(Op2) ||
|
||||
cast<RegisterSDNode>(Op2)->getReg() != 0)
|
||||
return 0;
|
||||
|
||||
// Now let's examine the displacements.
|
||||
if (isa<ConstantSDNode>(Load1->getOperand(3)) &&
|
||||
isa<ConstantSDNode>(Load2->getOperand(3))) {
|
||||
Offset1 = cast<ConstantSDNode>(Load1->getOperand(3))->getSExtValue();
|
||||
Offset2 = cast<ConstantSDNode>(Load2->getOperand(3))->getSExtValue();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
||||
int64_t Offset1, int64_t Offset2,
|
||||
unsigned NumLoads) const {
|
||||
assert(Offset2 > Offset1);
|
||||
if ((Offset2 - Offset1) / 8 > 64)
|
||||
return false;
|
||||
|
||||
unsigned Opc1 = Load1->getMachineOpcode();
|
||||
unsigned Opc2 = Load2->getMachineOpcode();
|
||||
if (Opc1 != Opc2)
|
||||
return false; // FIXME: overly conservative?
|
||||
|
||||
switch (Opc1) {
|
||||
default: break;
|
||||
case X86::LD_Fp32m:
|
||||
case X86::LD_Fp64m:
|
||||
case X86::LD_Fp80m:
|
||||
case X86::MMX_MOVD64rm:
|
||||
case X86::MMX_MOVQ64rm:
|
||||
return false;
|
||||
}
|
||||
|
||||
EVT VT = Load1->getValueType(0);
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
default: {
|
||||
// XMM registers. In 64-bit mode we can be a bit more aggressive since we
|
||||
// have 16 of them to play with.
|
||||
if (TM.getSubtargetImpl()->is64Bit()) {
|
||||
if (NumLoads >= 3)
|
||||
return false;
|
||||
} else if (NumLoads)
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
case MVT::i8:
|
||||
case MVT::i16:
|
||||
case MVT::i32:
|
||||
case MVT::i64:
|
||||
if (NumLoads)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool X86InstrInfo::
|
||||
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
||||
assert(Cond.size() == 1 && "Invalid X86 branch condition!");
|
||||
|
@ -610,6 +610,26 @@ public:
|
||||
bool UnfoldLoad, bool UnfoldStore,
|
||||
unsigned *LoadRegIndex = 0) const;
|
||||
|
||||
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
|
||||
/// to determine if two loads are loading from the same base address. It
|
||||
/// should only return true if the base pointers are the same and the
|
||||
/// only differences between the two addresses are the offset. It also returns
|
||||
/// the offsets by reference.
|
||||
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
||||
int64_t &Offset1, int64_t &Offset2) const;
|
||||
|
||||
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
||||
/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// be scheduled togther. On some targets if two loads are loading from
|
||||
/// addresses in the same cache line, it's better if they are scheduled
|
||||
/// together. This function takes two integers that represent the load offsets
|
||||
/// from the common base address. It returns true if it decides it's desirable
|
||||
/// to schedule the two loads together. "NumLoads" is the number of loads that
|
||||
/// have already been scheduled after Load1.
|
||||
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
||||
int64_t Offset1, int64_t Offset2,
|
||||
unsigned NumLoads) const;
|
||||
|
||||
virtual
|
||||
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user