Revert r164061-r164067. Most of the new subtarget emitter.

I have to work out the Target/CodeGen header dependencies
before putting this back.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164072 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Andrew Trick 2012-09-17 23:00:42 +00:00
parent 419e5b9d4f
commit e1b5328717
13 changed files with 64 additions and 356 deletions

View File

@ -18,7 +18,6 @@
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/SmallSet.h"
@ -182,9 +181,6 @@ namespace llvm {
/// Live Intervals provides reaching defs in preRA scheduling.
LiveIntervals *LIS;
/// TargetSchedModel provides an interface to the machine model.
TargetSchedModel SchedModel;
/// isPostRA flag indicates vregs cannot be present.
bool IsPostRA;

View File

@ -45,33 +45,17 @@ public:
/// Return true if this machine model includes an instruction-level scheduling
/// model. This is more detailed than the course grain IssueWidth and default
/// latency properties, but separate from the per-cycle itinerary data.
bool hasInstrSchedModel() const { return SchedModel.hasInstrSchedModel(); }
bool hasInstrSchedModel() const {
return SchedModel.hasInstrSchedModel();
}
/// Return true if this machine model includes cycle-to-cycle itinerary
/// data. This models scheduling at each stage in the processor pipeline.
bool hasInstrItineraries() const { return !InstrItins.isEmpty(); }
/// computeOperandLatency - Compute and return the latency of the given data
/// dependent def and use when the operand indices are already known. UseMI
/// may be NULL for an unknown user.
///
/// FindMin may be set to get the minimum vs. expected latency. Minimum
/// latency is used for scheduling groups, while expected latency is for
/// instruction cost and critical path.
unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
const MachineInstr *UseMI, unsigned UseOperIdx,
bool FindMin) const;
bool hasInstrItineraries() const {
return SchedModel.hasInstrItineraries();
}
unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
private:
/// getDefLatency is a helper for computeOperandLatency. Return the
/// instruction's latency if operand lookup is not required.
/// Otherwise return -1.
int getDefLatency(const MachineInstr *DefMI, bool FindMin) const;
/// Return the MCSchedClassDesc for this instruction.
const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
};
} // namespace llvm

View File

@ -172,8 +172,10 @@ private:
unsigned ProcID;
const MCProcResourceDesc *ProcResourceTable;
const MCSchedClassDesc *SchedClassTable;
#ifndef NDEBUG
unsigned NumProcResourceKinds;
unsigned NumSchedClasses;
#endif
// Instruction itinerary tables used by InstrItineraryData.
friend class InstrItineraryData;
const InstrItinerary *InstrItineraries;
@ -188,27 +190,26 @@ public:
LoadLatency(DefaultLoadLatency),
HighLatency(DefaultHighLatency),
MispredictPenalty(DefaultMispredictPenalty),
ProcID(0), ProcResourceTable(0), SchedClassTable(0),
NumProcResourceKinds(0), NumSchedClasses(0),
InstrItineraries(0) {
(void)NumProcResourceKinds;
(void)NumSchedClasses;
}
ProcID(0), InstrItineraries(0) {}
// Table-gen driven ctor.
MCSchedModel(unsigned iw, int ml, unsigned ll, unsigned hl, unsigned mp,
unsigned pi, const MCProcResourceDesc *pr,
const MCSchedClassDesc *sc, unsigned npr, unsigned nsc,
const InstrItinerary *ii):
IssueWidth(iw), MinLatency(ml), LoadLatency(ll), HighLatency(hl),
MispredictPenalty(mp), ProcID(pi), ProcResourceTable(pr),
SchedClassTable(sc), NumProcResourceKinds(npr), NumSchedClasses(nsc),
InstrItineraries(ii) {}
MispredictPenalty(mp), ProcID(0), ProcResourceTable(0),
SchedClassTable(0), InstrItineraries(ii) {}
unsigned getProcessorID() const { return ProcID; }
/// Does this machine model include instruction-level scheduling.
bool hasInstrSchedModel() const { return SchedClassTable; }
bool hasInstrSchedModel() const {
return SchedClassTable;
}
/// Does this machine model include cycle-to-cycle itineraries.
bool hasInstrItineraries() const {
return InstrItineraries;
}
const MCProcResourceDesc *getProcResource(unsigned ProcResourceIdx) const {
assert(hasInstrSchedModel() && "No scheduling machine model");

View File

@ -36,7 +36,6 @@ class MCSubtargetInfo {
const MCWriteProcResEntry *WriteProcResTable;
const MCWriteLatencyEntry *WriteLatencyTable;
const MCReadAdvanceEntry *ReadAdvanceTable;
const MCSchedModel *CPUSchedModel;
const InstrStage *Stages; // Instruction itinerary stages
const unsigned *OperandCycles; // Itinerary operand cycles
@ -50,9 +49,6 @@ public:
const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD,
const SubtargetInfoKV *ProcSched,
const MCWriteProcResEntry *WPR,
const MCWriteLatencyEntry *WL,
const MCReadAdvanceEntry *RA,
const InstrStage *IS,
const unsigned *OC, const unsigned *FP,
unsigned NF, unsigned NP);
@ -68,9 +64,9 @@ public:
return FeatureBits;
}
/// InitMCProcessorInfo - Set or change the CPU (optionally supplemented with
/// feature string). Recompute feature bits and scheduling model.
void InitMCProcessorInfo(StringRef CPU, StringRef FS);
/// ReInitMCSubtargetInfo - Change CPU (and optionally supplemented with
/// feature string), recompute and return feature bits.
uint64_t ReInitMCSubtargetInfo(StringRef CPU, StringRef FS);
/// ToggleFeature - Toggle a feature and returns the re-computed feature
/// bits. This version does not change the implied bits.
@ -84,10 +80,6 @@ public:
///
const MCSchedModel *getSchedModelForCPU(StringRef CPU) const;
/// getSchedModel - Get the machine model for this subtarget's CPU.
///
const MCSchedModel *getSchedModel() const { return CPUSchedModel; }
/// Return an iterator at the first process resource consumed by the given
/// scheduling class.
const MCWriteProcResEntry *getWriteProcResBegin(

View File

@ -824,9 +824,6 @@ public:
unsigned defaultDefLatency(const MCSchedModel *SchedModel,
const MachineInstr *DefMI) const;
int computeDefOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, bool FindMin) const;
/// isHighLatencyDef - Return true if this opcode has high latency to its
/// result.
virtual bool isHighLatencyDef(int opc) const { return false; }

View File

@ -14,7 +14,6 @@
#ifndef LLVM_TARGET_TARGETSUBTARGETINFO_H
#define LLVM_TARGET_TARGETSUBTARGETINFO_H
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/CodeGen.h"
@ -44,24 +43,6 @@ public:
virtual ~TargetSubtargetInfo();
/// Initialize a copy of the scheduling model for this subtarget.
/// TargetSchedModel provides the interface for the subtarget's
/// instruction scheduling information.
void initSchedModel(TargetSchedModel &SchedModel,
const TargetInstrInfo *TII) const {
// CPUSchedModel is initialized to a static instance by InitMCSubtargetInfo.
SchedModel.init(*getSchedModel(), this, TII);
}
/// Resolve a SchedClass at runtime, where SchedClass identifies an
/// MCSchedClassDesc with the isVariant property. This may return the ID of
/// another variant SchedClass, but repeated invocation must quickly terminate
/// in a nonvariant SchedClass.
virtual unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,
const TargetSchedModel* SchedModel) const {
return 0;
}
/// getSpecialAddressLatency - For targets where it is beneficial to
/// backschedule instructions that compute addresses, return a value
/// indicating the number of scheduling cycles of backscheduling that

View File

@ -606,13 +606,13 @@ getOperandLatency(const InstrItineraryData *ItinData,
/// If we can determine the operand latency from the def only, without itinerary
/// lookup, do so. Otherwise return -1.
int TargetInstrInfo::computeDefOperandLatency(
const InstrItineraryData *ItinData,
const MachineInstr *DefMI, bool FindMin) const {
static int computeDefOperandLatency(
const TargetInstrInfo *TII, const InstrItineraryData *ItinData,
const MachineInstr *DefMI, bool FindMin) {
// Let the target hook getInstrLatency handle missing itineraries.
if (!ItinData)
return getInstrLatency(ItinData, DefMI);
return TII->getInstrLatency(ItinData, DefMI);
// Return a latency based on the itinerary properties and defining instruction
// if possible. Some common subtargets don't require per-operand latency,
@ -621,7 +621,7 @@ int TargetInstrInfo::computeDefOperandLatency(
// If MinLatency is valid, call getInstrLatency. This uses Stage latency if
// it exists before defaulting to MinLatency.
if (ItinData->SchedModel->MinLatency >= 0)
return getInstrLatency(ItinData, DefMI);
return TII->getInstrLatency(ItinData, DefMI);
// If MinLatency is invalid, OperandLatency is interpreted as MinLatency.
// For empty itineraries, short-cirtuit the check and default to one cycle.
@ -629,7 +629,7 @@ int TargetInstrInfo::computeDefOperandLatency(
return 1;
}
else if(ItinData->isEmpty())
return defaultDefLatency(ItinData->SchedModel, DefMI);
return TII->defaultDefLatency(ItinData->SchedModel, DefMI);
// ...operand lookup required
return -1;
@ -652,7 +652,7 @@ computeOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *UseMI, unsigned UseIdx,
bool FindMin) const {
int DefLatency = computeDefOperandLatency(ItinData, DefMI, FindMin);
int DefLatency = computeDefOperandLatency(this, ItinData, DefMI, FindMin);
if (DefLatency >= 0)
return DefLatency;

View File

@ -14,7 +14,6 @@
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Support/CommandLine.h"
@ -23,9 +22,6 @@ using namespace llvm;
static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(false),
cl::desc("Use TargetSchedModel for latency lookup"));
static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
cl::desc("Use InstrItineraryData for latency lookup"));
void TargetSchedModel::init(const MCSchedModel &sm,
const TargetSubtargetInfo *sti,
const TargetInstrInfo *tii) {
@ -34,139 +30,3 @@ void TargetSchedModel::init(const MCSchedModel &sm,
TII = tii;
STI->initInstrItins(InstrItins);
}
/// If we can determine the operand latency from the def only, without machine
/// model or itinerary lookup, do so. Otherwise return -1.
int TargetSchedModel::getDefLatency(const MachineInstr *DefMI,
bool FindMin) const {
// Return a latency based on the itinerary properties and defining instruction
// if possible. Some common subtargets don't require per-operand latency,
// especially for minimum latencies.
if (FindMin) {
// If MinLatency is invalid, then use the itinerary for MinLatency. If no
// itinerary exists either, then use single cycle latency.
if (SchedModel.MinLatency < 0
&& !(EnableSchedItins && hasInstrItineraries())) {
return 1;
}
return SchedModel.MinLatency;
}
else if (!(EnableSchedModel && hasInstrSchedModel())
&& !(EnableSchedItins && hasInstrItineraries())) {
return TII->defaultDefLatency(&SchedModel, DefMI);
}
// ...operand lookup required
return -1;
}
/// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
/// evaluation of predicates that depend on instruction operands or flags.
const MCSchedClassDesc *TargetSchedModel::
resolveSchedClass(const MachineInstr *MI) const {
// Get the definition's scheduling class descriptor from this machine model.
unsigned SchedClass = MI->getDesc().getSchedClass();
const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
#ifndef NDEBUG
unsigned NIter = 0;
#endif
while (SCDesc->isVariant()) {
assert(++NIter < 6 && "Variants are nested deeper than the magic number");
SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
SCDesc = SchedModel.getSchedClassDesc(SchedClass);
}
return SCDesc;
}
/// Find the def index of this operand. This index maps to the machine model and
/// is independent of use operands. Def operands may be reordered with uses or
/// merged with uses without affecting the def index (e.g. before/after
/// regalloc). However, an instruction's def operands must never be reordered
/// with respect to each other.
static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
unsigned DefIdx = 0;
for (unsigned i = 0; i != DefOperIdx; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef())
++DefIdx;
}
return DefIdx;
}
/// Find the use index of this operand. This is independent of the instruction's
/// def operands.
static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
unsigned UseIdx = 0;
for (unsigned i = 0; i != UseOperIdx; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse())
++UseIdx;
}
return UseIdx;
}
// Top-level API for clients that know the operand indices.
unsigned TargetSchedModel::computeOperandLatency(
const MachineInstr *DefMI, unsigned DefOperIdx,
const MachineInstr *UseMI, unsigned UseOperIdx,
bool FindMin) const {
int DefLatency = getDefLatency(DefMI, FindMin);
if (DefLatency >= 0)
return DefLatency;
if (!FindMin && EnableSchedModel && hasInstrSchedModel()) {
const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
if (DefIdx < SCDesc->NumWriteLatencyEntries) {
// Lookup the definition's write latency in SubtargetInfo.
const MCWriteLatencyEntry *WLEntry =
STI->getWriteLatencyEntry(SCDesc, DefIdx);
unsigned WriteID = WLEntry->WriteResourceID;
unsigned Latency = WLEntry->Cycles;
if (!UseMI)
return Latency;
// Lookup the use's latency adjustment in SubtargetInfo.
const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
if (UseDesc->NumReadAdvanceEntries == 0)
return Latency;
unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
return Latency - STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
}
// If DefIdx does not exist in the model (e.g. implicit defs), then return
// unit latency (defaultDefLatency may be too conservative).
// TODO: For unknown defs, we may want to use the subtarget's model
// for WAW latency here instead of 1 cycle.
assert((!SCDesc->isValid() || DefMI->getOperand(DefOperIdx).isImplicit()) &&
"DefIdx exceeds machine model def operand list");
return 1;
}
assert(EnableSchedItins && hasInstrItineraries() &&
"operand latency requires itinerary");
int OperLatency = 0;
if (UseMI) {
OperLatency =
TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, UseMI, UseOperIdx);
}
else {
unsigned DefClass = DefMI->getDesc().getSchedClass();
OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
}
if (OperLatency >= 0)
return OperLatency;
// No operand latency was found.
unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
// Expected latency is the max of the stage latency and itinerary props.
if (!FindMin)
InstrLatency = std::max(InstrLatency,
TII->defaultDefLatency(&SchedModel, DefMI));
return InstrLatency;
}

View File

@ -19,28 +19,11 @@ using namespace llvm;
MCSchedModel MCSchedModel::DefaultSchedModel; // For unknown processors.
/// ReInitMCSubtargetInfo - Set or chaing the CPU (optionally supplemented
/// with feature string). Recompute feature bits and scheduling model.
void
MCSubtargetInfo::InitMCProcessorInfo(StringRef CPU, StringRef FS) {
SubtargetFeatures Features(FS);
FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs,
ProcFeatures, NumFeatures);
if (!CPU.empty())
CPUSchedModel = getSchedModelForCPU(CPU);
else
CPUSchedModel = &MCSchedModel::DefaultSchedModel;
}
void
MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD,
const SubtargetInfoKV *ProcSched,
const MCWriteProcResEntry *WPR,
const MCWriteLatencyEntry *WL,
const MCReadAdvanceEntry *RA,
const InstrStage *IS,
const unsigned *OC,
const unsigned *FP,
@ -49,17 +32,25 @@ MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
ProcFeatures = PF;
ProcDesc = PD;
ProcSchedModels = ProcSched;
WriteProcResTable = WPR;
WriteLatencyTable = WL;
ReadAdvanceTable = RA;
Stages = IS;
OperandCycles = OC;
ForwardingPaths = FP;
NumFeatures = NF;
NumProcs = NP;
InitMCProcessorInfo(CPU, FS);
SubtargetFeatures Features(FS);
FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs,
ProcFeatures, NumFeatures);
}
/// ReInitMCSubtargetInfo - Change CPU (and optionally supplemented with
/// feature string) and recompute feature bits.
uint64_t MCSubtargetInfo::ReInitMCSubtargetInfo(StringRef CPU, StringRef FS) {
SubtargetFeatures Features(FS);
FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs,
ProcFeatures, NumFeatures);
return FeatureBits;
}
/// ToggleFeature - Toggle a feature and returns the re-computed feature
@ -114,5 +105,5 @@ MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const {
/// Initialize an InstrItineraryData instance.
void MCSubtargetInfo::initInstrItins(InstrItineraryData &InstrItins) const {
InstrItins =
InstrItineraryData(CPUSchedModel, Stages, OperandCycles, ForwardingPaths);
InstrItineraryData(0, Stages, OperandCycles, ForwardingPaths);
}

View File

@ -13,9 +13,8 @@
#include "ARMSubtarget.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMBaseInstrInfo.h"
#include "llvm/GlobalValue.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Support/CommandLine.h"
#define GET_SUBTARGETINFO_TARGET_DESC

View File

@ -83,7 +83,7 @@ struct CodeGenSchedRW {
#endif
};
/// Represent a transition between SchedClasses induced by SchedVariant.
/// Represent a transition between SchedClasses induced by SchedWriteVariant.
struct CodeGenSchedTransition {
unsigned ToClassIdx;
IdxVec ProcIndices;
@ -304,6 +304,15 @@ public:
return SchedClasses[Idx];
}
// Get an itinerary class's index. Value indices are '0' for NoItinerary up to
// and including numItineraryClasses().
unsigned getItinClassIdx(Record *ItinDef) const {
assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass");
unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName());
assert(Idx <= NumItineraryClasses && "bad ItinClass index");
return Idx;
}
// Get the SchedClass index for an instruction. Instructions with no
// itinerary, no SchedReadWrites, and no InstrReadWrites references return 0
// for NoItinerary.

View File

@ -304,10 +304,11 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
MinOperands = Inst.Operands.back().MIOperandNo +
Inst.Operands.back().MINumOperands;
Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary");
OS << " { ";
OS << Num << ",\t" << MinOperands << ",\t"
<< Inst.Operands.NumDefs << ",\t"
<< SchedModels.getSchedClassIdx(Inst) << ",\t"
<< SchedModels.getItinClassIdx(ItinDef) << ",\t"
<< Inst.TheDef->getValueAsInt("Size") << ",\t0";
// Emit all of the target indepedent flags...

View File

@ -87,7 +87,6 @@ class SubtargetEmitter {
void EmitSchedClassTables(SchedClassTables &SchedTables, raw_ostream &OS);
void EmitProcessorModels(raw_ostream &OS);
void EmitProcessorLookup(raw_ostream &OS);
void EmitSchedModelHelpers(std::string ClassName, raw_ostream &OS);
void EmitSchedModel(raw_ostream &OS);
void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures,
unsigned NumProcs);
@ -709,7 +708,7 @@ void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) {
SCTab.resize(SCTab.size() + 1);
MCSchedClassDesc &SCDesc = SCTab.back();
// SCDesc.Name is guarded by NDEBUG
SCDesc.Name = SCI->Name.c_str();
SCDesc.NumMicroOps = 0;
SCDesc.BeginGroup = false;
SCDesc.EndGroup = false;
@ -1020,15 +1019,6 @@ void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
EmitProcessorProp(OS, PI->ModelDef, "LoadLatency", ',');
EmitProcessorProp(OS, PI->ModelDef, "HighLatency", ',');
EmitProcessorProp(OS, PI->ModelDef, "MispredictPenalty", ',');
OS << " " << PI->Index << ", // Processor ID\n";
if (PI->hasInstrSchedModel())
OS << " " << PI->ModelName << "ProcResources" << ",\n"
<< " " << PI->ModelName << "SchedClasses" << ",\n"
<< " " << PI->ProcResourceDefs.size()+1 << ",\n"
<< " " << (SchedModels.schedClassEnd()
- SchedModels.schedClassBegin()) << ",\n";
else
OS << " 0, 0, 0, 0, // No instruction-level machine model.\n";
if (SchedModels.hasItineraryClasses())
OS << " " << PI->ItinsDef->getName() << ");\n";
else
@ -1110,85 +1100,6 @@ void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) {
OS << "#undef DBGFIELD";
}
void SubtargetEmitter::EmitSchedModelHelpers(std::string ClassName,
raw_ostream &OS) {
OS << "unsigned " << ClassName
<< "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
<< " const TargetSchedModel *SchedModel) const {\n";
std::vector<Record*> Prologs = Records.getAllDerivedDefinitions("PredicateProlog");
std::sort(Prologs.begin(), Prologs.end(), LessRecord());
for (std::vector<Record*>::const_iterator
PI = Prologs.begin(), PE = Prologs.end(); PI != PE; ++PI) {
OS << (*PI)->getValueAsString("Code") << '\n';
}
IdxVec VariantClasses;
for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(),
SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) {
if (SCI->Transitions.empty())
continue;
VariantClasses.push_back(SCI - SchedModels.schedClassBegin());
}
if (!VariantClasses.empty()) {
OS << " switch (SchedClass) {\n";
for (IdxIter VCI = VariantClasses.begin(), VCE = VariantClasses.end();
VCI != VCE; ++VCI) {
const CodeGenSchedClass &SC = SchedModels.getSchedClass(*VCI);
OS << " case " << *VCI << ": // " << SC.Name << '\n';
IdxVec ProcIndices;
for (std::vector<CodeGenSchedTransition>::const_iterator
TI = SC.Transitions.begin(), TE = SC.Transitions.end();
TI != TE; ++TI) {
IdxVec PI;
std::set_union(TI->ProcIndices.begin(), TI->ProcIndices.end(),
ProcIndices.begin(), ProcIndices.end(),
std::back_inserter(PI));
ProcIndices.swap(PI);
}
for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
PI != PE; ++PI) {
OS << " ";
if (*PI != 0)
OS << "if (SchedModel->getProcessorID() == " << *PI << ") ";
OS << "{ // " << (SchedModels.procModelBegin() + *PI)->ModelName
<< '\n';
for (std::vector<CodeGenSchedTransition>::const_iterator
TI = SC.Transitions.begin(), TE = SC.Transitions.end();
TI != TE; ++TI) {
OS << " if (";
if (*PI != 0 && !std::count(TI->ProcIndices.begin(),
TI->ProcIndices.end(), *PI)) {
continue;
}
for (RecIter RI = TI->PredTerm.begin(), RE = TI->PredTerm.end();
RI != RE; ++RI) {
if (RI != TI->PredTerm.begin())
OS << "\n && ";
OS << "(" << (*RI)->getValueAsString("Predicate") << ")";
}
OS << ")\n"
<< " return " << TI->ToClassIdx << "; // "
<< SchedModels.getSchedClass(TI->ToClassIdx).Name << '\n';
}
OS << " }\n";
if (*PI == 0)
break;
}
unsigned SCIdx = 0;
if (SC.ItinClassDef)
SCIdx = SchedModels.getSchedClassIdxForItin(SC.ItinClassDef);
else
SCIdx = SchedModels.findSchedClassIdx(SC.Writes, SC.Reads);
if (SCIdx != *VCI)
OS << " return " << SCIdx << ";\n";
OS << " break;\n";
}
OS << " };\n";
}
OS << " report_fatal_error(\"Expected a variant SchedClass\");\n"
<< "} // " << ClassName << "::resolveSchedClass\n";
}
//
// ParseFeaturesFunction - Produces a subtarget specific function for parsing
// the subtarget features string.
@ -1213,8 +1124,7 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS,
return;
}
OS << " InitMCProcessorInfo(CPU, FS);\n"
<< " uint64_t Bits = getFeatureBits();\n";
OS << " uint64_t Bits = ReInitMCSubtargetInfo(CPU, FS);\n";
for (unsigned i = 0; i < Features.size(); i++) {
// Next record
@ -1282,17 +1192,13 @@ void SubtargetEmitter::run(raw_ostream &OS) {
else
OS << "0, ";
OS << '\n'; OS.indent(22);
OS << Target << "ProcSchedKV, "
<< Target << "WriteProcResTable, "
<< Target << "WriteLatencyTable, "
<< Target << "ReadAdvanceTable, ";
if (SchedModels.hasItineraryClasses()) {
OS << '\n'; OS.indent(22);
OS << Target << "Stages, "
OS << Target << "ProcSchedKV, "
<< Target << "Stages, "
<< Target << "OperandCycles, "
<< Target << "ForwardingPaths, ";
} else
OS << "0, 0, 0, ";
OS << "0, 0, 0, 0, ";
OS << NumFeatures << ", " << NumProcs << ");\n}\n\n";
OS << "} // End llvm namespace \n";
@ -1319,8 +1225,6 @@ void SubtargetEmitter::run(raw_ostream &OS) {
<< " explicit " << ClassName << "(StringRef TT, StringRef CPU, "
<< "StringRef FS);\n"
<< "public:\n"
<< " unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *DefMI,"
<< " const TargetSchedModel *SchedModel) const;\n"
<< " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
<< " const;\n"
<< "};\n";
@ -1360,22 +1264,15 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << Target << "SubTypeKV, ";
else
OS << "0, ";
OS << '\n'; OS.indent(22);
OS << Target << "ProcSchedKV, "
<< Target << "WriteProcResTable, "
<< Target << "WriteLatencyTable, "
<< Target << "ReadAdvanceTable, ";
OS << '\n'; OS.indent(22);
if (SchedModels.hasItineraryClasses()) {
OS << Target << "Stages, "
OS << Target << "ProcSchedKV, "
<< Target << "Stages, "
<< Target << "OperandCycles, "
<< Target << "ForwardingPaths, ";
} else
OS << "0, 0, 0, ";
OS << "0, 0, 0, 0, ";
OS << NumFeatures << ", " << NumProcs << ");\n}\n\n";
EmitSchedModelHelpers(ClassName, OS);
OS << "} // End llvm namespace \n";
OS << "#endif // GET_SUBTARGETINFO_CTOR\n\n";