MISched: Heuristics, compare latency more precisely. It matters more for some targets.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@170452 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Andrew Trick
2012-12-18 20:52:56 +00:00
parent 9c676c2941
commit 44fd0bcc40

View File

@@ -954,19 +954,25 @@ public:
// Number of micro-ops left to schedule. // Number of micro-ops left to schedule.
unsigned RemainingMicroOps; unsigned RemainingMicroOps;
unsigned MaxRemainingCount;
void reset() { void reset() {
CriticalPath = 0; CriticalPath = 0;
RemainingCounts.clear(); RemainingCounts.clear();
CritResIdx = 0; CritResIdx = 0;
RemainingMicroOps = 0; RemainingMicroOps = 0;
MaxRemainingCount = 0;
} }
SchedRemainder() { reset(); } SchedRemainder() { reset(); }
void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
unsigned getMaxRemainingCount(const TargetSchedModel *SchedModel) const {
if (!SchedModel->hasInstrSchedModel())
return 0;
return std::max(
RemainingMicroOps * SchedModel->getMicroOpFactor(),
RemainingCounts[CritResIdx]);
}
}; };
/// Each Scheduling boundary is associated with ready queues. It tracks the /// Each Scheduling boundary is associated with ready queues. It tracks the
@@ -1007,9 +1013,6 @@ public:
unsigned ExpectedCount; unsigned ExpectedCount;
// Policy flag: attempt to find ILP until expected latency is covered.
bool ShouldIncreaseILP;
#ifndef NDEBUG #ifndef NDEBUG
// Remember the greatest min operand latency. // Remember the greatest min operand latency.
unsigned MaxMinLatency; unsigned MaxMinLatency;
@@ -1030,7 +1033,6 @@ public:
CritResIdx = 0; CritResIdx = 0;
IsResourceLimited = false; IsResourceLimited = false;
ExpectedCount = 0; ExpectedCount = 0;
ShouldIncreaseILP = false;
#ifndef NDEBUG #ifndef NDEBUG
MaxMinLatency = 0; MaxMinLatency = 0;
#endif #endif
@@ -1058,7 +1060,7 @@ public:
unsigned getUnscheduledLatency(SUnit *SU) const { unsigned getUnscheduledLatency(SUnit *SU) const {
if (isTop()) if (isTop())
return SU->getHeight(); return SU->getHeight();
return SU->getDepth(); return SU->getDepth() + SU->Latency;
} }
unsigned getCriticalCount() const { unsigned getCriticalCount() const {
@@ -1067,7 +1069,7 @@ public:
bool checkHazard(SUnit *SU); bool checkHazard(SUnit *SU);
void checkILPPolicy(); void setLatencyPolicy(CandPolicy &Policy);
void releaseNode(SUnit *SU, unsigned ReadyCycle); void releaseNode(SUnit *SU, unsigned ReadyCycle);
@@ -1170,9 +1172,6 @@ init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
CritResIdx = PIdx; CritResIdx = PIdx;
} }
} }
MaxRemainingCount = std::max(
RemainingMicroOps * SchedModel->getMicroOpFactor(),
RemainingCounts[CritResIdx]);
} }
void ConvergingScheduler::SchedBoundary:: void ConvergingScheduler::SchedBoundary::
@@ -1281,12 +1280,27 @@ bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) {
return false; return false;
} }
/// If expected latency is covered, disable ILP policy. /// Compute the remaining latency to determine whether ILP should be increased.
void ConvergingScheduler::SchedBoundary::checkILPPolicy() { void ConvergingScheduler::SchedBoundary::setLatencyPolicy(CandPolicy &Policy) {
if (ShouldIncreaseILP // FIXME: compile time. In all, we visit four queues here one we should only
&& (IsResourceLimited || ExpectedLatency <= CurrCycle)) { // need to visit the one that was last popped if we cache the result.
ShouldIncreaseILP = false; unsigned RemLatency = 0;
DEBUG(dbgs() << "Disable ILP: " << Available.getName() << '\n'); for (ReadyQueue::iterator I = Available.begin(), E = Available.end();
I != E; ++I) {
unsigned L = getUnscheduledLatency(*I);
if (L > RemLatency)
RemLatency = L;
}
for (ReadyQueue::iterator I = Pending.begin(), E = Pending.end();
I != E; ++I) {
unsigned L = getUnscheduledLatency(*I);
if (L > RemLatency)
RemLatency = L;
}
if (RemLatency + ExpectedLatency >= Rem->CriticalPath + ILPWindow
&& RemLatency > Rem->getMaxRemainingCount(SchedModel)) {
Policy.ReduceLatency = true;
DEBUG(dbgs() << "Increase ILP: " << Available.getName() << '\n');
} }
} }
@@ -1305,15 +1319,6 @@ void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU,
// Record this node as an immediate dependent of the scheduled node. // Record this node as an immediate dependent of the scheduled node.
NextSUs.insert(SU); NextSUs.insert(SU);
// If CriticalPath has been computed, then check if the unscheduled nodes
// exceed the ILP window. Before registerRoots, CriticalPath==0.
if (Rem->CriticalPath && (ExpectedLatency + getUnscheduledLatency(SU)
> Rem->CriticalPath + ILPWindow)) {
ShouldIncreaseILP = true;
DEBUG(dbgs() << "Increase ILP: " << Available.getName() << " "
<< ExpectedLatency << " + " << getUnscheduledLatency(SU) << '\n');
}
} }
/// Move the boundary of scheduled code by one cycle. /// Move the boundary of scheduled code by one cycle.
@@ -1361,9 +1366,6 @@ void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx,
assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
Rem->RemainingCounts[PIdx] -= Count; Rem->RemainingCounts[PIdx] -= Count;
// Reset MaxRemainingCount for sanity.
Rem->MaxRemainingCount = 0;
// Check if this resource exceeds the current critical resource by a full // Check if this resource exceeds the current critical resource by a full
// cycle. If so, it becomes the critical resource. // cycle. If so, it becomes the critical resource.
if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx]) if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx])
@@ -1495,9 +1497,7 @@ SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() {
/// resources. /// resources.
/// ///
/// If the CriticalZone is latency limited, don't force a policy for the /// If the CriticalZone is latency limited, don't force a policy for the
/// candidates here. Instead, When releasing each candidate, releaseNode /// candidates here. Instead, setLatencyPolicy sets ReduceLatency if needed.
/// compares the region's critical path to the candidate's height or depth and
/// the scheduled zone's expected latency then sets ShouldIncreaseILP.
void ConvergingScheduler::balanceZones( void ConvergingScheduler::balanceZones(
ConvergingScheduler::SchedBoundary &CriticalZone, ConvergingScheduler::SchedBoundary &CriticalZone,
ConvergingScheduler::SchedCandidate &CriticalCand, ConvergingScheduler::SchedCandidate &CriticalCand,
@@ -1506,6 +1506,7 @@ void ConvergingScheduler::balanceZones(
if (!CriticalZone.IsResourceLimited) if (!CriticalZone.IsResourceLimited)
return; return;
assert(SchedModel->hasInstrSchedModel() && "required schedmodel");
SchedRemainder *Rem = CriticalZone.Rem; SchedRemainder *Rem = CriticalZone.Rem;
@@ -1513,7 +1514,7 @@ void ConvergingScheduler::balanceZones(
// remainder, try to reduce it. // remainder, try to reduce it.
unsigned RemainingCritCount = unsigned RemainingCritCount =
Rem->RemainingCounts[CriticalZone.CritResIdx]; Rem->RemainingCounts[CriticalZone.CritResIdx];
if ((int)(Rem->MaxRemainingCount - RemainingCritCount) if ((int)(Rem->getMaxRemainingCount(SchedModel) - RemainingCritCount)
> (int)SchedModel->getLatencyFactor()) { > (int)SchedModel->getLatencyFactor()) {
CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx; CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx;
DEBUG(dbgs() << "Balance " << CriticalZone.Available.getName() << " reduce " DEBUG(dbgs() << "Balance " << CriticalZone.Available.getName() << " reduce "
@@ -1539,12 +1540,9 @@ void ConvergingScheduler::checkResourceLimits(
ConvergingScheduler::SchedCandidate &TopCand, ConvergingScheduler::SchedCandidate &TopCand,
ConvergingScheduler::SchedCandidate &BotCand) { ConvergingScheduler::SchedCandidate &BotCand) {
Bot.checkILPPolicy(); // Set ReduceLatency to true if needed.
Top.checkILPPolicy(); Bot.setLatencyPolicy(TopCand.Policy);
if (Bot.ShouldIncreaseILP) Top.setLatencyPolicy(BotCand.Policy);
BotCand.Policy.ReduceLatency = true;
if (Top.ShouldIncreaseILP)
TopCand.Policy.ReduceLatency = true;
// Handle resource-limited regions. // Handle resource-limited regions.
if (Top.IsResourceLimited && Bot.IsResourceLimited if (Top.IsResourceLimited && Bot.IsResourceLimited
@@ -1579,9 +1577,6 @@ void ConvergingScheduler::checkResourceLimits(
// The critical resource is different in each zone, so request balancing. // The critical resource is different in each zone, so request balancing.
// Compute the cost of each zone. // Compute the cost of each zone.
Rem.MaxRemainingCount = std::max(
Rem.RemainingMicroOps * SchedModel->getMicroOpFactor(),
Rem.RemainingCounts[Rem.CritResIdx]);
Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle); Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle);
Top.ExpectedCount = std::max( Top.ExpectedCount = std::max(
Top.getCriticalCount(), Top.getCriticalCount(),