mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-05 17:39:16 +00:00
misched: Added MultiIssueItineraries.
This allows a subtarget to explicitly specify the issue width and other properties without providing pipeline stage details for every instruction. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157979 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
4eb4e5eb22
commit
fc992996f7
@ -103,27 +103,86 @@ struct InstrItinerary {
|
||||
};
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// Instruction itinerary properties - These properties provide general
|
||||
/// information about the microarchitecture to the scheduler.
|
||||
///
|
||||
struct InstrItineraryProps {
|
||||
// IssueWidth is the maximum number of instructions that may be scheduled in
|
||||
// the same per-cycle group.
|
||||
unsigned IssueWidth;
|
||||
|
||||
// MinLatency is the minimum latency between a register write
|
||||
// followed by a data dependent read. This determines which
|
||||
// instructions may be scheduled in the same per-cycle group. This
|
||||
// is distinct from *expected* latency, which determines the likely
|
||||
// critical path but does not guarantee a pipeline
|
||||
// hazard. MinLatency can always be overridden by the number of
|
||||
// InstrStage cycles.
|
||||
//
|
||||
// (-1) Standard in-order processor.
|
||||
// Use InstrItinerary OperandCycles as MinLatency.
|
||||
// If no OperandCycles exist, then use the cycle of the last InstrStage.
|
||||
//
|
||||
// (0) Out-of-order processor, or in-order with bundled dependencies.
|
||||
// RAW dependencies may be dispatched in the same cycle.
|
||||
// Optional InstrItinerary OperandCycles provides expected latency.
|
||||
//
|
||||
// (>0) In-order processor with variable latencies.
|
||||
// Use the greater of this value or the cycle of the last InstrStage.
|
||||
// Optional InstrItinerary OperandCycles provides expected latency.
|
||||
// TODO: can't yet specify both min and expected latency per operand.
|
||||
int MinLatency;
|
||||
|
||||
// LoadLatency is the expected latency of load instructions.
|
||||
//
|
||||
// If MinLatency >= 0, this may be overriden for individual load opcodes by
|
||||
// InstrItinerary OperandCycles.
|
||||
unsigned LoadLatency;
|
||||
|
||||
// HighLatency is the expected latency of "very high latency" operations.
|
||||
// See TargetInstrInfo::isHighLatencyDef().
|
||||
// By default, this is set to an arbitrarily high number of cycles
|
||||
// likely to have some impact on scheduling heuristics.
|
||||
// If MinLatency >= 0, this may be overriden by InstrItinData OperandCycles.
|
||||
unsigned HighLatency;
|
||||
|
||||
InstrItineraryProps(): IssueWidth(1), MinLatency(-1), LoadLatency(4),
|
||||
HighLatency(10) {}
|
||||
|
||||
InstrItineraryProps(unsigned iw, int ml, unsigned ll, unsigned hl):
|
||||
IssueWidth(iw), MinLatency(ml), LoadLatency(ll), HighLatency(hl) {}
|
||||
};
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// Encapsulate all subtarget specific information for scheduling for use with
|
||||
/// SubtargetInfoKV.
|
||||
struct InstrItinerarySubtargetValue {
|
||||
const InstrItineraryProps *Props;
|
||||
const InstrItinerary *Itineraries;
|
||||
};
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be
|
||||
/// used by a target.
|
||||
///
|
||||
class InstrItineraryData {
|
||||
public:
|
||||
InstrItineraryProps Props;
|
||||
const InstrStage *Stages; ///< Array of stages selected
|
||||
const unsigned *OperandCycles; ///< Array of operand cycles selected
|
||||
const unsigned *Forwardings; ///< Array of pipeline forwarding pathes
|
||||
const InstrItinerary *Itineraries; ///< Array of itineraries selected
|
||||
unsigned IssueWidth; ///< Max issue per cycle. 0=Unknown.
|
||||
|
||||
/// Ctors.
|
||||
///
|
||||
InstrItineraryData() : Stages(0), OperandCycles(0), Forwardings(0),
|
||||
Itineraries(0), IssueWidth(0) {}
|
||||
Itineraries(0) {}
|
||||
|
||||
InstrItineraryData(const InstrStage *S, const unsigned *OS,
|
||||
const unsigned *F, const InstrItinerary *I)
|
||||
: Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I),
|
||||
IssueWidth(0) {}
|
||||
InstrItineraryData(const InstrItineraryProps *P, const InstrStage *S,
|
||||
const unsigned *OS, const unsigned *F,
|
||||
const InstrItinerary *I)
|
||||
: Props(*P), Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I) {}
|
||||
|
||||
/// isEmpty - Returns true if there are no itineraries.
|
||||
///
|
||||
@ -160,7 +219,7 @@ public:
|
||||
// non-zero default value for all instructions. Some target's provide a
|
||||
// dummy (Generic) itinerary which should be handled as if it's itinerary is
|
||||
// empty. We identify this by looking for a reference to stage zero (invalid
|
||||
// stage). This is different from beginStage == endState != 0, which could
|
||||
// stage). This is different from beginStage == endStage != 0, which could
|
||||
// be used for zero-latency pseudo ops.
|
||||
if (isEmpty() || Itineraries[ItinClassIndx].FirstStage == 0)
|
||||
return 1;
|
||||
|
@ -117,9 +117,16 @@ class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
|
||||
// Processor itineraries - These values represent the set of all itinerary
|
||||
// classes for a given chip set.
|
||||
//
|
||||
// Set property values to -1 to use the default.
|
||||
// See InstrItineraryProps for comments and defaults.
|
||||
class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
|
||||
list<InstrItinData> iid> {
|
||||
int IssueWidth = 1;
|
||||
int IssueWidth = -1; // Max instructions that may be scheduled per cycle.
|
||||
int MinLatency = -1; // Determines which instrucions are allowed in a group.
|
||||
// (-1) inorder (0) ooo, (1): inorder +var latencies.
|
||||
int LoadLatency = -1; // Cycles for loads to access the cache.
|
||||
int HighLatency = -1; // Approximation of cycles for "high latency" ops.
|
||||
|
||||
list<FuncUnit> FU = fu;
|
||||
list<Bypass> BP = bp;
|
||||
list<InstrItinData> IID = iid;
|
||||
@ -129,3 +136,15 @@ class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
|
||||
// info.
|
||||
def NoItineraries : ProcessorItineraries<[], [], []>;
|
||||
|
||||
// Processor itineraries with non-unit issue width. This allows issue
|
||||
// width to be explicity specified at the beginning of the itinerary.
|
||||
class MultiIssueItineraries<int issuewidth, int minlatency,
|
||||
int loadlatency, int highlatency,
|
||||
list<FuncUnit> fu, list<Bypass> bp,
|
||||
list<InstrItinData> iid>
|
||||
: ProcessorItineraries<fu, bp, iid> {
|
||||
let IssueWidth = issuewidth;
|
||||
let MinLatency = minlatency;
|
||||
let LoadLatency = loadlatency;
|
||||
let HighLatency = highlatency;
|
||||
}
|
||||
|
@ -44,8 +44,6 @@ ScoreboardHazardRecognizer(const InstrItineraryData *II,
|
||||
// avoid dealing with the boundary condition.
|
||||
unsigned ScoreboardDepth = 1;
|
||||
if (ItinData && !ItinData->isEmpty()) {
|
||||
IssueWidth = ItinData->IssueWidth;
|
||||
|
||||
for (unsigned idx = 0; ; ++idx) {
|
||||
if (ItinData->isEndMarker(idx))
|
||||
break;
|
||||
@ -74,11 +72,13 @@ ScoreboardHazardRecognizer(const InstrItineraryData *II,
|
||||
ReservedScoreboard.reset(ScoreboardDepth);
|
||||
RequiredScoreboard.reset(ScoreboardDepth);
|
||||
|
||||
if (!MaxLookAhead)
|
||||
if (!isEnabled())
|
||||
DEBUG(dbgs() << "Disabled scoreboard hazard recognizer\n");
|
||||
else
|
||||
else {
|
||||
IssueWidth = ItinData->Props.IssueWidth;
|
||||
DEBUG(dbgs() << "Using scoreboard hazard recognizer: Depth = "
|
||||
<< ScoreboardDepth << '\n');
|
||||
}
|
||||
}
|
||||
|
||||
void ScoreboardHazardRecognizer::Reset() {
|
||||
|
@ -318,7 +318,7 @@ void ResourcePriorityQueue::reserveResources(SUnit *SU) {
|
||||
|
||||
// If packet is now full, reset the state so in the next cycle
|
||||
// we start fresh.
|
||||
if (Packet.size() >= InstrItins->IssueWidth) {
|
||||
if (Packet.size() >= InstrItins->Props.IssueWidth) {
|
||||
ResourcesModel->clearResources();
|
||||
Packet.clear();
|
||||
}
|
||||
|
@ -91,6 +91,8 @@ MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const {
|
||||
return InstrItineraryData();
|
||||
}
|
||||
|
||||
return InstrItineraryData(Stages, OperandCycles, ForwardingPathes,
|
||||
(InstrItinerary *)Found->Value);
|
||||
InstrItinerarySubtargetValue *V =
|
||||
(InstrItinerarySubtargetValue *)Found->Value;
|
||||
return InstrItineraryData(V->Props, Stages, OperandCycles, ForwardingPathes,
|
||||
V->Itineraries);
|
||||
}
|
||||
|
@ -200,13 +200,14 @@ void ARMSubtarget::computeIssueWidth() {
|
||||
const InstrStage *IS = InstrItins.Stages + itin->FirstStage;
|
||||
allStage1Units |= IS->getUnits();
|
||||
}
|
||||
InstrItins.IssueWidth = 0;
|
||||
InstrItins.Props.IssueWidth = 0;
|
||||
while (allStage1Units) {
|
||||
++InstrItins.IssueWidth;
|
||||
++InstrItins.Props.IssueWidth;
|
||||
// clear the lowest bit
|
||||
allStage1Units ^= allStage1Units & ~(allStage1Units - 1);
|
||||
}
|
||||
assert(InstrItins.IssueWidth <= 2 && "itinerary bug, too many stage 1 units");
|
||||
assert(InstrItins.Props.IssueWidth <= 2 &&
|
||||
"itinerary bug, too many stage 1 units");
|
||||
}
|
||||
|
||||
bool ARMSubtarget::enablePostRAScheduler(
|
||||
|
@ -41,7 +41,10 @@ def HexagonItineraries :
|
||||
InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>,
|
||||
InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
|
||||
InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
|
||||
]>;
|
||||
]> {
|
||||
// Max issue per cycle == bundle width.
|
||||
let IssueWidth = 4;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// V4 Machine Info +
|
||||
|
@ -52,7 +52,11 @@ def HexagonItinerariesV4 :
|
||||
InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
|
||||
InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
|
||||
InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>
|
||||
]>;
|
||||
]> {
|
||||
// Max issue per cycle == bundle width.
|
||||
let IssueWidth = 4;
|
||||
}
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Hexagon V4 Resource Definitions -
|
||||
|
@ -61,9 +61,6 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
|
||||
// Initialize scheduling itinerary for the specified CPU.
|
||||
InstrItins = getInstrItineraryForCPU(CPUString);
|
||||
|
||||
// Max issue per cycle == bundle width.
|
||||
InstrItins.IssueWidth = 4;
|
||||
|
||||
if (EnableMemOps)
|
||||
UseMemOps = true;
|
||||
else
|
||||
|
@ -43,13 +43,6 @@ MBlazeSubtarget::MBlazeSubtarget(const std::string &TT,
|
||||
|
||||
// Initialize scheduling itinerary for the specified CPU.
|
||||
InstrItins = getInstrItineraryForCPU(CPUName);
|
||||
|
||||
// Compute the issue width of the MBlaze itineraries
|
||||
computeIssueWidth();
|
||||
}
|
||||
|
||||
void MBlazeSubtarget::computeIssueWidth() {
|
||||
InstrItins.IssueWidth = 1;
|
||||
}
|
||||
|
||||
bool MBlazeSubtarget::
|
||||
|
@ -478,6 +478,17 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
|
||||
OS << BypassTable;
|
||||
}
|
||||
|
||||
void SubtargetEmitter::EmitItineraryProp(raw_ostream &OS, const Record *R,
|
||||
const char *Name, char Separator) {
|
||||
OS << " ";
|
||||
int V = R->getValueAsInt(Name);
|
||||
if (V >= 0)
|
||||
OS << V << Separator << " // " << Name;
|
||||
else
|
||||
OS << "DefaultItineraryProps." << Name << Separator;
|
||||
OS << '\n';
|
||||
}
|
||||
|
||||
//
|
||||
// EmitProcessorData - Generate data for processor itineraries.
|
||||
//
|
||||
@ -485,6 +496,8 @@ void SubtargetEmitter::
|
||||
EmitProcessorData(raw_ostream &OS,
|
||||
std::vector<Record*> &ItinClassList,
|
||||
std::vector<std::vector<InstrItinerary> > &ProcList) {
|
||||
OS << "static const llvm::InstrItineraryProps " << "DefaultItineraryProps;";
|
||||
|
||||
// Get an iterator for processor itinerary stages
|
||||
std::vector<std::vector<InstrItinerary> >::iterator
|
||||
ProcListIter = ProcList.begin();
|
||||
@ -502,9 +515,19 @@ EmitProcessorData(raw_ostream &OS,
|
||||
// Skip default
|
||||
if (Name == "NoItineraries") continue;
|
||||
|
||||
// Begin processor itinerary properties
|
||||
OS << "\n";
|
||||
OS << "static const llvm::InstrItineraryProps " << Name << "Props(\n";
|
||||
EmitItineraryProp(OS, Itin, "IssueWidth", ',');
|
||||
EmitItineraryProp(OS, Itin, "MinLatency", ',');
|
||||
EmitItineraryProp(OS, Itin, "LoadLatency", ',');
|
||||
EmitItineraryProp(OS, Itin, "HighLatency", ' ');
|
||||
OS << ");\n";
|
||||
|
||||
// Begin processor itinerary table
|
||||
OS << "\n";
|
||||
OS << "static const llvm::InstrItinerary " << Name << "[] = {\n";
|
||||
OS << "static const llvm::InstrItinerary " << Name << "Entries"
|
||||
<< "[] = {\n";
|
||||
|
||||
// For each itinerary class
|
||||
std::vector<InstrItinerary> &ItinList = *ProcListIter++;
|
||||
@ -531,6 +554,13 @@ EmitProcessorData(raw_ostream &OS,
|
||||
// End processor itinerary table
|
||||
OS << " { 1, ~0U, ~0U, ~0U, ~0U } // end marker\n";
|
||||
OS << "};\n";
|
||||
|
||||
OS << '\n';
|
||||
OS << "static const llvm::InstrItinerarySubtargetValue "
|
||||
<< Name << " = {\n";
|
||||
OS << " &" << Name << "Props,\n";
|
||||
OS << " " << Name << "Entries\n";
|
||||
OS << "};\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,6 +47,8 @@ class SubtargetEmitter : public TableGenBackend {
|
||||
std::map<std::string, unsigned> &ItinClassesMap,
|
||||
std::vector<Record*> &ItinClassList,
|
||||
std::vector<std::vector<InstrItinerary> > &ProcList);
|
||||
void EmitItineraryProp(raw_ostream &OS, const Record *R, const char *Name,
|
||||
char Separator);
|
||||
void EmitProcessorData(raw_ostream &OS,
|
||||
std::vector<Record*> &ItinClassList,
|
||||
std::vector<std::vector<InstrItinerary> > &ProcList);
|
||||
|
Loading…
x
Reference in New Issue
Block a user