Chain dependencies used to enforce memory order should have latency of 0 (except for true dependency of Store followed by aliased Load... we estimate that case with a single cycle of latency assuming the hardware will bypass)

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@85807 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
David Goodwin 2009-11-02 17:06:28 +00:00
parent 28989a8ddc
commit 7c9b1ac507

View File

@ -317,29 +317,35 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
}
// Add chain dependencies.
// Chain dependencies used to enforce memory order should have
// latency of 0 (except for true dependency of Store followed by
// aliased Load... we estimate that with a single cycle of latency
// assuming the hardware will bypass)
// Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
// after stack slots are lowered to actual addresses.
// TODO: Use an AliasAnalysis and do real alias-analysis queries, and
// produce more precise dependence information.
#define STORE_LOAD_LATENCY 1
unsigned TrueMemOrderLatency = 0;
if (TID.isCall() || TID.hasUnmodeledSideEffects()) {
new_chain:
// This is the conservative case. Add dependencies on all memory
// references.
if (Chain)
Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
Chain = SU;
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
PendingLoads.clear();
for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
E = MemDefs.end(); I != E; ++I) {
I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
I->second = SU;
}
for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
MemUses.begin(), E = MemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
I->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency));
I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
I->second.clear();
}
// See if it is known to just have a single memory reference.
@ -356,12 +362,13 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// Unknown memory accesses. Assume the worst.
ChainMMO = 0;
} else if (TID.mayStore()) {
TrueMemOrderLatency = STORE_LOAD_LATENCY;
if (const Value *V = getUnderlyingObjectForInstr(MI, MFI)) {
// A store to a specific PseudoSourceValue. Add precise dependencies.
// Handle the def in MemDefs, if there is one.
std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
if (I != MemDefs.end()) {
I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
/*isNormalMemory=*/true));
I->second = SU;
} else {
@ -372,35 +379,37 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
MemUses.find(V);
if (J != MemUses.end()) {
for (unsigned i = 0, e = J->second.size(); i != e; ++i)
J->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
/*isNormalMemory=*/true));
J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency,
/*Reg=*/0, /*isNormalMemory=*/true));
J->second.clear();
}
// Add dependencies from all the PendingLoads, since without
// memoperands we must assume they alias anything.
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
// Add a general dependence too, if needed.
if (Chain)
Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
} else
Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
} else {
// Treat all other stores conservatively.
goto new_chain;
}
} else if (TID.mayLoad()) {
TrueMemOrderLatency = 0;
if (MI->isInvariantLoad(AA)) {
// Invariant load, no chain dependencies needed!
} else if (const Value *V = getUnderlyingObjectForInstr(MI, MFI)) {
// A load from a specific PseudoSourceValue. Add precise dependencies.
std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
if (I != MemDefs.end())
I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
/*isNormalMemory=*/true));
MemUses[V].push_back(SU);
// Add a general dependence too, if needed.
if (Chain && (!ChainMMO ||
(ChainMMO->isStore() || ChainMMO->isVolatile())))
Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
} else if (MI->hasVolatileMemoryRef()) {
// Treat volatile loads conservatively. Note that this includes
// cases where memoperand information is unavailable.
@ -411,10 +420,10 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// we can't even assume that the load doesn't alias well-behaved
// memory locations.
if (Chain)
Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
E = MemDefs.end(); I != E; ++I)
I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
PendingLoads.push_back(SU);
}
}