llvm-6502/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp
Evan Cheng 9e23336d0c Experimental scheduler change to schedule / coalesce the copies added for function livein's. Take 2008-03-10-RegAllocInfLoop.ll, the schedule looks like this after these copies are inserted:
entry: 0x12049d0, LLVM BB @0x1201fd0, ID#0:
Live Ins: %EAX %EDX %ECX
        %reg1031<def> = MOVPC32r 0
        %reg1032<def> = ADD32ri %reg1031, <es:_GLOBAL_OFFSET_TABLE_>, %EFLAGS<imp-def>
        %reg1028<def> = MOV32rr %EAX
        %reg1029<def> = MOV32rr %EDX
        %reg1030<def> = MOV32rr %ECX
        %reg1027<def> = MOV8rm %reg0, 1, %reg0, 0, Mem:LD(1,1) [0x1201910 + 0]
        %reg1025<def> = MOV32rr %reg1029
        %reg1026<def> = MOV32rr %reg1030
        %reg1024<def> = MOV32rr %reg1028

The copies unnecessarily increase register pressure and it will end up requiring a physical register to be spilled.

With -schedule-livein-copies:
entry: 0x12049d0, LLVM BB @0x1201fa0, ID#0:
Live Ins: %EAX %EDX %ECX
        %reg1031<def> = MOVPC32r 0
        %reg1032<def> = ADD32ri %reg1031, <es:_GLOBAL_OFFSET_TABLE_>, %EFLAGS<imp-def>
        %reg1024<def> = MOV32rr %EAX
        %reg1025<def> = MOV32rr %EDX
        %reg1026<def> = MOV32rr %ECX
        %reg1027<def> = MOV8rm %reg0, 1, %reg0, 0, Mem:LD(1,1) [0x12018e0 + 0]

Much better!


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48307 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-12 22:19:41 +00:00

1249 lines
46 KiB
C++

//===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This implements a simple two pass scheduler. The first pass attempts to push
// backward any lengthy instructions and critical paths. The second pass packs
// instructions into semi-optimal time slots.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "pre-RA-sched"
#include "llvm/Constants.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm;
STATISTIC(NumCommutes, "Number of instructions commuted");
namespace {
static cl::opt<bool>
SchedLiveInCopies("schedule-livein-copies",
cl::desc("Schedule copies of livein registers"),
cl::init(false));
}
ScheduleDAG::ScheduleDAG(SelectionDAG &dag, MachineBasicBlock *bb,
const TargetMachine &tm)
: DAG(dag), BB(bb), TM(tm), MRI(BB->getParent()->getRegInfo()) {
TII = TM.getInstrInfo();
MF = &DAG.getMachineFunction();
TRI = TM.getRegisterInfo();
ConstPool = BB->getParent()->getConstantPool();
}
/// CheckForPhysRegDependency - Check if the dependency between def and use of
/// a specified operand is a physical register dependency. If so, returns the
/// register and the cost of copying the register.
static void CheckForPhysRegDependency(SDNode *Def, SDNode *Use, unsigned Op,
const TargetRegisterInfo *TRI,
const TargetInstrInfo *TII,
unsigned &PhysReg, int &Cost) {
if (Op != 2 || Use->getOpcode() != ISD::CopyToReg)
return;
unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg))
return;
unsigned ResNo = Use->getOperand(2).ResNo;
if (Def->isTargetOpcode()) {
const TargetInstrDesc &II = TII->get(Def->getTargetOpcode());
if (ResNo >= II.getNumDefs() &&
II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
PhysReg = Reg;
const TargetRegisterClass *RC =
TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
Cost = RC->getCopyCost();
}
}
}
SUnit *ScheduleDAG::Clone(SUnit *Old) {
SUnit *SU = NewSUnit(Old->Node);
SU->FlaggedNodes = Old->FlaggedNodes;
SU->InstanceNo = SUnitMap[Old->Node].size();
SU->Latency = Old->Latency;
SU->isTwoAddress = Old->isTwoAddress;
SU->isCommutable = Old->isCommutable;
SU->hasPhysRegDefs = Old->hasPhysRegDefs;
SUnitMap[Old->Node].push_back(SU);
return SU;
}
/// BuildSchedUnits - Build SUnits from the selection dag that we are input.
/// This SUnit graph is similar to the SelectionDAG, but represents flagged
/// together nodes with a single SUnit.
void ScheduleDAG::BuildSchedUnits() {
// Reserve entries in the vector for each of the SUnits we are creating. This
// ensure that reallocation of the vector won't happen, so SUnit*'s won't get
// invalidated.
SUnits.reserve(std::distance(DAG.allnodes_begin(), DAG.allnodes_end()));
for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
E = DAG.allnodes_end(); NI != E; ++NI) {
if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
continue;
// If this node has already been processed, stop now.
if (SUnitMap[NI].size()) continue;
SUnit *NodeSUnit = NewSUnit(NI);
// See if anything is flagged to this node, if so, add them to flagged
// nodes. Nodes can have at most one flag input and one flag output. Flags
// are required the be the last operand and result of a node.
// Scan up, adding flagged preds to FlaggedNodes.
SDNode *N = NI;
if (N->getNumOperands() &&
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
do {
N = N->getOperand(N->getNumOperands()-1).Val;
NodeSUnit->FlaggedNodes.push_back(N);
SUnitMap[N].push_back(NodeSUnit);
} while (N->getNumOperands() &&
N->getOperand(N->getNumOperands()-1).getValueType()== MVT::Flag);
std::reverse(NodeSUnit->FlaggedNodes.begin(),
NodeSUnit->FlaggedNodes.end());
}
// Scan down, adding this node and any flagged succs to FlaggedNodes if they
// have a user of the flag operand.
N = NI;
while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
SDOperand FlagVal(N, N->getNumValues()-1);
// There are either zero or one users of the Flag result.
bool HasFlagUse = false;
for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
UI != E; ++UI)
if (FlagVal.isOperandOf(*UI)) {
HasFlagUse = true;
NodeSUnit->FlaggedNodes.push_back(N);
SUnitMap[N].push_back(NodeSUnit);
N = *UI;
break;
}
if (!HasFlagUse) break;
}
// Now all flagged nodes are in FlaggedNodes and N is the bottom-most node.
// Update the SUnit
NodeSUnit->Node = N;
SUnitMap[N].push_back(NodeSUnit);
ComputeLatency(NodeSUnit);
}
// Pass 2: add the preds, succs, etc.
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
SUnit *SU = &SUnits[su];
SDNode *MainNode = SU->Node;
if (MainNode->isTargetOpcode()) {
unsigned Opc = MainNode->getTargetOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
SU->isTwoAddress = true;
break;
}
}
if (TID.isCommutable())
SU->isCommutable = true;
}
// Find all predecessors and successors of the group.
// Temporarily add N to make code simpler.
SU->FlaggedNodes.push_back(MainNode);
for (unsigned n = 0, e = SU->FlaggedNodes.size(); n != e; ++n) {
SDNode *N = SU->FlaggedNodes[n];
if (N->isTargetOpcode() &&
TII->get(N->getTargetOpcode()).getImplicitDefs() &&
CountResults(N) > TII->get(N->getTargetOpcode()).getNumDefs())
SU->hasPhysRegDefs = true;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDNode *OpN = N->getOperand(i).Val;
if (isPassiveNode(OpN)) continue; // Not scheduled.
SUnit *OpSU = SUnitMap[OpN].front();
assert(OpSU && "Node has no SUnit!");
if (OpSU == SU) continue; // In the same group.
MVT::ValueType OpVT = N->getOperand(i).getValueType();
assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
bool isChain = OpVT == MVT::Other;
unsigned PhysReg = 0;
int Cost = 1;
// Determine if this is a physical register dependency.
CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
SU->addPred(OpSU, isChain, false, PhysReg, Cost);
}
}
// Remove MainNode from FlaggedNodes again.
SU->FlaggedNodes.pop_back();
}
return;
}
void ScheduleDAG::ComputeLatency(SUnit *SU) {
const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
// Compute the latency for the node. We use the sum of the latencies for
// all nodes flagged together into this SUnit.
if (InstrItins.isEmpty()) {
// No latency information.
SU->Latency = 1;
} else {
SU->Latency = 0;
if (SU->Node->isTargetOpcode()) {
unsigned SchedClass =
TII->get(SU->Node->getTargetOpcode()).getSchedClass();
InstrStage *S = InstrItins.begin(SchedClass);
InstrStage *E = InstrItins.end(SchedClass);
for (; S != E; ++S)
SU->Latency += S->Cycles;
}
for (unsigned i = 0, e = SU->FlaggedNodes.size(); i != e; ++i) {
SDNode *FNode = SU->FlaggedNodes[i];
if (FNode->isTargetOpcode()) {
unsigned SchedClass =TII->get(FNode->getTargetOpcode()).getSchedClass();
InstrStage *S = InstrItins.begin(SchedClass);
InstrStage *E = InstrItins.end(SchedClass);
for (; S != E; ++S)
SU->Latency += S->Cycles;
}
}
}
}
/// CalculateDepths - compute depths using algorithms for the longest
/// paths in the DAG
void ScheduleDAG::CalculateDepths() {
unsigned DAGSize = SUnits.size();
std::vector<unsigned> InDegree(DAGSize);
std::vector<SUnit*> WorkList;
WorkList.reserve(DAGSize);
// Initialize the data structures
for (unsigned i = 0, e = DAGSize; i != e; ++i) {
SUnit *SU = &SUnits[i];
int NodeNum = SU->NodeNum;
unsigned Degree = SU->Preds.size();
InDegree[NodeNum] = Degree;
SU->Depth = 0;
// Is it a node without dependencies?
if (Degree == 0) {
assert(SU->Preds.empty() && "SUnit should have no predecessors");
// Collect leaf nodes
WorkList.push_back(SU);
}
}
// Process nodes in the topological order
while (!WorkList.empty()) {
SUnit *SU = WorkList.back();
WorkList.pop_back();
unsigned &SUDepth = SU->Depth;
// Use dynamic programming:
// When current node is being processed, all of its dependencies
// are already processed.
// So, just iterate over all predecessors and take the longest path
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
unsigned PredDepth = I->Dep->Depth;
if (PredDepth+1 > SUDepth) {
SUDepth = PredDepth + 1;
}
}
// Update InDegrees of all nodes depending on current SUnit
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
SUnit *SU = I->Dep;
if (!--InDegree[SU->NodeNum])
// If all dependencies of the node are processed already,
// then the longest path for the node can be computed now
WorkList.push_back(SU);
}
}
}
/// CalculateHeights - compute heights using algorithms for the longest
/// paths in the DAG
void ScheduleDAG::CalculateHeights() {
unsigned DAGSize = SUnits.size();
std::vector<unsigned> InDegree(DAGSize);
std::vector<SUnit*> WorkList;
WorkList.reserve(DAGSize);
// Initialize the data structures
for (unsigned i = 0, e = DAGSize; i != e; ++i) {
SUnit *SU = &SUnits[i];
int NodeNum = SU->NodeNum;
unsigned Degree = SU->Succs.size();
InDegree[NodeNum] = Degree;
SU->Height = 0;
// Is it a node without dependencies?
if (Degree == 0) {
assert(SU->Succs.empty() && "Something wrong");
assert(WorkList.empty() && "Should be empty");
// Collect leaf nodes
WorkList.push_back(SU);
}
}
// Process nodes in the topological order
while (!WorkList.empty()) {
SUnit *SU = WorkList.back();
WorkList.pop_back();
unsigned &SUHeight = SU->Height;
// Use dynamic programming:
// When current node is being processed, all of its dependencies
// are already processed.
// So, just iterate over all successors and take the longest path
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
unsigned SuccHeight = I->Dep->Height;
if (SuccHeight+1 > SUHeight) {
SUHeight = SuccHeight + 1;
}
}
// Update InDegrees of all nodes depending on current SUnit
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
SUnit *SU = I->Dep;
if (!--InDegree[SU->NodeNum])
// If all dependencies of the node are processed already,
// then the longest path for the node can be computed now
WorkList.push_back(SU);
}
}
}
/// CountResults - The results of target nodes have register or immediate
/// operands first, then an optional chain, and optional flag operands (which do
/// not go into the resulting MachineInstr).
unsigned ScheduleDAG::CountResults(SDNode *Node) {
unsigned N = Node->getNumValues();
while (N && Node->getValueType(N - 1) == MVT::Flag)
--N;
if (N && Node->getValueType(N - 1) == MVT::Other)
--N; // Skip over chain result.
return N;
}
/// CountOperands - The inputs to target nodes have any actual inputs first,
/// followed by special operands that describe memory references, then an
/// optional chain operand, then flag operands. Compute the number of
/// actual operands that will go into the resulting MachineInstr.
unsigned ScheduleDAG::CountOperands(SDNode *Node) {
unsigned N = ComputeMemOperandsEnd(Node);
while (N && isa<MemOperandSDNode>(Node->getOperand(N - 1).Val))
--N; // Ignore MemOperand nodes
return N;
}
/// ComputeMemOperandsEnd - Find the index one past the last MemOperandSDNode
/// operand
unsigned ScheduleDAG::ComputeMemOperandsEnd(SDNode *Node) {
unsigned N = Node->getNumOperands();
while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
--N;
if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
--N; // Ignore chain if it exists.
return N;
}
static const TargetRegisterClass *getInstrOperandRegClass(
const TargetRegisterInfo *TRI,
const TargetInstrInfo *TII,
const TargetInstrDesc &II,
unsigned Op) {
if (Op >= II.getNumOperands()) {
assert(II.isVariadic() && "Invalid operand # of instruction");
return NULL;
}
if (II.OpInfo[Op].isLookupPtrRegClass())
return TII->getPointerRegClass();
return TRI->getRegClass(II.OpInfo[Op].RegClass);
}
void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo,
unsigned InstanceNo, unsigned SrcReg,
DenseMap<SDOperand, unsigned> &VRBaseMap) {
unsigned VRBase = 0;
if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
// Just use the input register directly!
if (InstanceNo > 0)
VRBaseMap.erase(SDOperand(Node, ResNo));
bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo),SrcReg));
assert(isNew && "Node emitted out of order - early");
return;
}
// If the node is only used by a CopyToReg and the dest reg is a vreg, use
// the CopyToReg'd destination register instead of creating a new vreg.
bool MatchReg = true;
for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
UI != E; ++UI) {
SDNode *Use = *UI;
bool Match = true;
if (Use->getOpcode() == ISD::CopyToReg &&
Use->getOperand(2).Val == Node &&
Use->getOperand(2).ResNo == ResNo) {
unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
VRBase = DestReg;
Match = false;
} else if (DestReg != SrcReg)
Match = false;
} else {
for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
SDOperand Op = Use->getOperand(i);
if (Op.Val != Node || Op.ResNo != ResNo)
continue;
MVT::ValueType VT = Node->getValueType(Op.ResNo);
if (VT != MVT::Other && VT != MVT::Flag)
Match = false;
}
}
MatchReg &= Match;
if (VRBase)
break;
}
const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
SrcRC = TRI->getPhysicalRegisterRegClass(SrcReg, Node->getValueType(ResNo));
// Figure out the register class to create for the destreg.
if (VRBase) {
DstRC = MRI.getRegClass(VRBase);
} else {
DstRC = DAG.getTargetLoweringInfo()
.getRegClassFor(Node->getValueType(ResNo));
}
// If all uses are reading from the src physical register and copying the
// register is either impossible or very expensive, then don't create a copy.
if (MatchReg && SrcRC->getCopyCost() < 0) {
VRBase = SrcReg;
} else {
// Create the reg, emit the copy.
VRBase = MRI.createVirtualRegister(DstRC);
TII->copyRegToReg(*BB, BB->end(), VRBase, SrcReg, DstRC, SrcRC);
}
if (InstanceNo > 0)
VRBaseMap.erase(SDOperand(Node, ResNo));
bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo), VRBase));
assert(isNew && "Node emitted out of order - early");
}
void ScheduleDAG::CreateVirtualRegisters(SDNode *Node,
MachineInstr *MI,
const TargetInstrDesc &II,
DenseMap<SDOperand, unsigned> &VRBaseMap) {
for (unsigned i = 0; i < II.getNumDefs(); ++i) {
// If the specific node value is only used by a CopyToReg and the dest reg
// is a vreg, use the CopyToReg'd destination register instead of creating
// a new vreg.
unsigned VRBase = 0;
for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
UI != E; ++UI) {
SDNode *Use = *UI;
if (Use->getOpcode() == ISD::CopyToReg &&
Use->getOperand(2).Val == Node &&
Use->getOperand(2).ResNo == i) {
unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
VRBase = Reg;
MI->addOperand(MachineOperand::CreateReg(Reg, true));
break;
}
}
}
// Create the result registers for this node and add the result regs to
// the machine instruction.
if (VRBase == 0) {
const TargetRegisterClass *RC = getInstrOperandRegClass(TRI, TII, II, i);
assert(RC && "Isn't a register operand!");
VRBase = MRI.createVirtualRegister(RC);
MI->addOperand(MachineOperand::CreateReg(VRBase, true));
}
bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,i), VRBase));
assert(isNew && "Node emitted out of order - early");
}
}
/// getVR - Return the virtual register corresponding to the specified result
/// of the specified node.
static unsigned getVR(SDOperand Op, DenseMap<SDOperand, unsigned> &VRBaseMap) {
DenseMap<SDOperand, unsigned>::iterator I = VRBaseMap.find(Op);
assert(I != VRBaseMap.end() && "Node emitted out of order - late");
return I->second;
}
/// AddOperand - Add the specified operand to the specified machine instr. II
/// specifies the instruction information for the node, and IIOpNum is the
/// operand number (in the II) that we are adding. IIOpNum and II are used for
/// assertions only.
void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
DenseMap<SDOperand, unsigned> &VRBaseMap) {
if (Op.isTargetOpcode()) {
// Note that this case is redundant with the final else block, but we
// include it because it is the most common and it makes the logic
// simpler here.
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Flag &&
"Chain and flag operands should occur at end of operand list!");
// Get/emit the operand.
unsigned VReg = getVR(Op, VRBaseMap);
const TargetInstrDesc &TID = MI->getDesc();
bool isOptDef = (IIOpNum < TID.getNumOperands())
? (TID.OpInfo[IIOpNum].isOptionalDef()) : false;
MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef));
// Verify that it is right.
assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
#ifndef NDEBUG
if (II) {
// There may be no register class for this operand if it is a variadic
// argument (RC will be NULL in this case). In this case, we just assume
// the regclass is ok.
const TargetRegisterClass *RC =
getInstrOperandRegClass(TRI, TII, *II, IIOpNum);
assert((RC || II->isVariadic()) && "Expected reg class info!");
const TargetRegisterClass *VRC = MRI.getRegClass(VReg);
if (RC && VRC != RC) {
cerr << "Register class of operand and regclass of use don't agree!\n";
cerr << "Operand = " << IIOpNum << "\n";
cerr << "Op->Val = "; Op.Val->dump(&DAG); cerr << "\n";
cerr << "MI = "; MI->print(cerr);
cerr << "VReg = " << VReg << "\n";
cerr << "VReg RegClass size = " << VRC->getSize()
<< ", align = " << VRC->getAlignment() << "\n";
cerr << "Expected RegClass size = " << RC->getSize()
<< ", align = " << RC->getAlignment() << "\n";
cerr << "Fatal error, aborting.\n";
abort();
}
}
#endif
} else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateImm(C->getValue()));
} else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
const Type *FType = MVT::getTypeForValueType(Op.getValueType());
ConstantFP *CFP = ConstantFP::get(FType, F->getValueAPF());
MI->addOperand(MachineOperand::CreateFPImm(CFP));
} else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateReg(R->getReg(), false));
} else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateGA(TGA->getGlobal(),TGA->getOffset()));
} else if (BasicBlockSDNode *BB = dyn_cast<BasicBlockSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
} else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateFI(FI->getIndex()));
} else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateJTI(JT->getIndex()));
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
int Offset = CP->getOffset();
unsigned Align = CP->getAlignment();
const Type *Type = CP->getType();
// MachineConstantPool wants an explicit alignment.
if (Align == 0) {
Align = TM.getTargetData()->getPreferredTypeAlignmentShift(Type);
if (Align == 0) {
// Alignment of vector types. FIXME!
Align = TM.getTargetData()->getABITypeSize(Type);
Align = Log2_64(Align);
}
}
unsigned Idx;
if (CP->isMachineConstantPoolEntry())
Idx = ConstPool->getConstantPoolIndex(CP->getMachineCPVal(), Align);
else
Idx = ConstPool->getConstantPoolIndex(CP->getConstVal(), Align);
MI->addOperand(MachineOperand::CreateCPI(Idx, Offset));
} else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateES(ES->getSymbol()));
} else {
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Flag &&
"Chain and flag operands should occur at end of operand list!");
unsigned VReg = getVR(Op, VRBaseMap);
MI->addOperand(MachineOperand::CreateReg(VReg, false));
// Verify that it is right. Note that the reg class of the physreg and the
// vreg don't necessarily need to match, but the target copy insertion has
// to be able to handle it. This handles things like copies from ST(0) to
// an FP vreg on x86.
assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
if (II && !II->isVariadic()) {
assert(getInstrOperandRegClass(TRI, TII, *II, IIOpNum) &&
"Don't have operand info for this instruction!");
}
}
}
void ScheduleDAG::AddMemOperand(MachineInstr *MI, const MemOperand &MO) {
MI->addMemOperand(MO);
}
// Returns the Register Class of a subregister
static const TargetRegisterClass *getSubRegisterRegClass(
const TargetRegisterClass *TRC,
unsigned SubIdx) {
// Pick the register class of the subregister
TargetRegisterInfo::regclass_iterator I =
TRC->subregclasses_begin() + SubIdx-1;
assert(I < TRC->subregclasses_end() &&
"Invalid subregister index for register class");
return *I;
}
static const TargetRegisterClass *getSuperregRegisterClass(
const TargetRegisterClass *TRC,
unsigned SubIdx,
MVT::ValueType VT) {
// Pick the register class of the superegister for this type
for (TargetRegisterInfo::regclass_iterator I = TRC->superregclasses_begin(),
E = TRC->superregclasses_end(); I != E; ++I)
if ((*I)->hasType(VT) && getSubRegisterRegClass(*I, SubIdx) == TRC)
return *I;
assert(false && "Couldn't find the register class");
return 0;
}
/// EmitSubregNode - Generate machine code for subreg nodes.
///
void ScheduleDAG::EmitSubregNode(SDNode *Node,
DenseMap<SDOperand, unsigned> &VRBaseMap) {
unsigned VRBase = 0;
unsigned Opc = Node->getTargetOpcode();
if (Opc == TargetInstrInfo::EXTRACT_SUBREG) {
// If the node is only used by a CopyToReg and the dest reg is a vreg, use
// the CopyToReg'd destination register instead of creating a new vreg.
for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
UI != E; ++UI) {
SDNode *Use = *UI;
if (Use->getOpcode() == ISD::CopyToReg &&
Use->getOperand(2).Val == Node) {
unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
VRBase = DestReg;
break;
}
}
}
unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getValue();
// TODO: If the node is a use of a CopyFromReg from a physical register
// fold the extract into the copy now
// Create the extract_subreg machine instruction.
MachineInstr *MI =
new MachineInstr(BB, TII->get(TargetInstrInfo::EXTRACT_SUBREG));
// Figure out the register class to create for the destreg.
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
const TargetRegisterClass *SRC = getSubRegisterRegClass(TRC, SubIdx);
if (VRBase) {
// Grab the destination register
const TargetRegisterClass *DRC = MRI.getRegClass(VRBase);
assert(SRC && DRC && SRC == DRC &&
"Source subregister and destination must have the same class");
} else {
// Create the reg
assert(SRC && "Couldn't find source register class");
VRBase = MRI.createVirtualRegister(SRC);
}
// Add def, source, and subreg index
MI->addOperand(MachineOperand::CreateReg(VRBase, true));
AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap);
MI->addOperand(MachineOperand::CreateImm(SubIdx));
} else if (Opc == TargetInstrInfo::INSERT_SUBREG) {
SDOperand N0 = Node->getOperand(0);
SDOperand N1 = Node->getOperand(1);
SDOperand N2 = Node->getOperand(2);
unsigned SubReg = getVR(N1, VRBaseMap);
unsigned SubIdx = cast<ConstantSDNode>(N2)->getValue();
// TODO: Add tracking info to MachineRegisterInfo of which vregs are subregs
// to allow coalescing in the allocator
// If the node is only used by a CopyToReg and the dest reg is a vreg, use
// the CopyToReg'd destination register instead of creating a new vreg.
// If the CopyToReg'd destination register is physical, then fold the
// insert into the copy
for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
UI != E; ++UI) {
SDNode *Use = *UI;
if (Use->getOpcode() == ISD::CopyToReg &&
Use->getOperand(2).Val == Node) {
unsigned DestReg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
VRBase = DestReg;
break;
}
}
}
// Create the insert_subreg machine instruction.
MachineInstr *MI =
new MachineInstr(BB, TII->get(TargetInstrInfo::INSERT_SUBREG));
// Figure out the register class to create for the destreg.
const TargetRegisterClass *TRC = 0;
if (VRBase) {
TRC = MRI.getRegClass(VRBase);
} else {
TRC = getSuperregRegisterClass(MRI.getRegClass(SubReg), SubIdx,
Node->getValueType(0));
assert(TRC && "Couldn't determine register class for insert_subreg");
VRBase = MRI.createVirtualRegister(TRC); // Create the reg
}
MI->addOperand(MachineOperand::CreateReg(VRBase, true));
// If N0 is a constant then it indicates the insert is being done
// into a target specific constant value, not a register.
if (const ConstantSDNode *SD = dyn_cast<ConstantSDNode>(N0))
MI->addOperand(MachineOperand::CreateImm(SD->getValue()));
else
AddOperand(MI, N0, 0, 0, VRBaseMap);
// Add the subregster being inserted
AddOperand(MI, N1, 0, 0, VRBaseMap);
MI->addOperand(MachineOperand::CreateImm(SubIdx));
} else
assert(0 && "Node is not a subreg insert or extract");
bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,0), VRBase));
assert(isNew && "Node emitted out of order - early");
}
/// EmitNode - Generate machine code for an node and needed dependencies.
///
void ScheduleDAG::EmitNode(SDNode *Node, unsigned InstanceNo,
DenseMap<SDOperand, unsigned> &VRBaseMap) {
// If machine instruction
if (Node->isTargetOpcode()) {
unsigned Opc = Node->getTargetOpcode();
// Handle subreg insert/extract specially
if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
Opc == TargetInstrInfo::INSERT_SUBREG) {
EmitSubregNode(Node, VRBaseMap);
return;
}
const TargetInstrDesc &II = TII->get(Opc);
unsigned NumResults = CountResults(Node);
unsigned NodeOperands = CountOperands(Node);
unsigned MemOperandsEnd = ComputeMemOperandsEnd(Node);
unsigned NumMIOperands = NodeOperands + NumResults;
bool HasPhysRegOuts = (NumResults > II.getNumDefs()) &&
II.getImplicitDefs() != 0;
#ifndef NDEBUG
assert((II.getNumOperands() == NumMIOperands ||
HasPhysRegOuts || II.isVariadic()) &&
"#operands for dag node doesn't match .td file!");
#endif
// Create the new machine instruction.
MachineInstr *MI = new MachineInstr(II);
// Add result register values for things that are defined by this
// instruction.
if (NumResults)
CreateVirtualRegisters(Node, MI, II, VRBaseMap);
// Emit all of the actual operands of this instruction, adding them to the
// instruction as appropriate.
for (unsigned i = 0; i != NodeOperands; ++i)
AddOperand(MI, Node->getOperand(i), i+II.getNumDefs(), &II, VRBaseMap);
// Emit all of the memory operands of this instruction
for (unsigned i = NodeOperands; i != MemOperandsEnd; ++i)
AddMemOperand(MI, cast<MemOperandSDNode>(Node->getOperand(i))->MO);
// Commute node if it has been determined to be profitable.
if (CommuteSet.count(Node)) {
MachineInstr *NewMI = TII->commuteInstruction(MI);
if (NewMI == 0)
DOUT << "Sched: COMMUTING FAILED!\n";
else {
DOUT << "Sched: COMMUTED TO: " << *NewMI;
if (MI != NewMI) {
delete MI;
MI = NewMI;
}
++NumCommutes;
}
}
if (II.usesCustomDAGSchedInsertionHook())
// Insert this instruction into the basic block using a target
// specific inserter which may returns a new basic block.
BB = DAG.getTargetLoweringInfo().EmitInstrWithCustomInserter(MI, BB);
else
BB->push_back(MI);
// Additional results must be an physical register def.
if (HasPhysRegOuts) {
for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
if (Node->hasAnyUseOfValue(i))
EmitCopyFromReg(Node, i, InstanceNo, Reg, VRBaseMap);
}
}
} else {
switch (Node->getOpcode()) {
default:
#ifndef NDEBUG
Node->dump(&DAG);
#endif
assert(0 && "This target-independent node should have been selected!");
case ISD::EntryToken: // fall thru
case ISD::TokenFactor:
case ISD::LABEL:
case ISD::DECLARE:
case ISD::SRCVALUE:
break;
case ISD::CopyToReg: {
unsigned SrcReg;
SDOperand SrcVal = Node->getOperand(2);
if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
SrcReg = R->getReg();
else
SrcReg = getVR(SrcVal, VRBaseMap);
unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
break;
const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
// Get the register classes of the src/dst.
if (TargetRegisterInfo::isVirtualRegister(SrcReg))
SrcTRC = MRI.getRegClass(SrcReg);
else
SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
if (TargetRegisterInfo::isVirtualRegister(DestReg))
DstTRC = MRI.getRegClass(DestReg);
else
DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
Node->getOperand(1).getValueType());
TII->copyRegToReg(*BB, BB->end(), DestReg, SrcReg, DstTRC, SrcTRC);
break;
}
case ISD::CopyFromReg: {
unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
EmitCopyFromReg(Node, 0, InstanceNo, SrcReg, VRBaseMap);
break;
}
case ISD::INLINEASM: {
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
--NumOps; // Ignore the flag operand.
// Create the inline asm machine instruction.
MachineInstr *MI =
new MachineInstr(BB, TII->get(TargetInstrInfo::INLINEASM));
// Add the asm string as an external symbol operand.
const char *AsmStr =
cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
MI->addOperand(MachineOperand::CreateES(AsmStr));
// Add all of the operand registers to the instruction.
for (unsigned i = 2; i != NumOps;) {
unsigned Flags = cast<ConstantSDNode>(Node->getOperand(i))->getValue();
unsigned NumVals = Flags >> 3;
MI->addOperand(MachineOperand::CreateImm(Flags));
++i; // Skip the ID value.
switch (Flags & 7) {
default: assert(0 && "Bad flags!");
case 1: // Use of register.
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
MI->addOperand(MachineOperand::CreateReg(Reg, false));
}
break;
case 2: // Def of register.
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
MI->addOperand(MachineOperand::CreateReg(Reg, true));
}
break;
case 3: { // Immediate.
for (; NumVals; --NumVals, ++i) {
if (ConstantSDNode *CS =
dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
MI->addOperand(MachineOperand::CreateImm(CS->getValue()));
} else if (GlobalAddressSDNode *GA =
dyn_cast<GlobalAddressSDNode>(Node->getOperand(i))) {
MI->addOperand(MachineOperand::CreateGA(GA->getGlobal(),
GA->getOffset()));
} else {
BasicBlockSDNode *BB =cast<BasicBlockSDNode>(Node->getOperand(i));
MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
}
}
break;
}
case 4: // Addressing mode.
// The addressing mode has been selected, just add all of the
// operands to the machine instruction.
for (; NumVals; --NumVals, ++i)
AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
break;
}
}
break;
}
}
}
}
void ScheduleDAG::EmitNoop() {
TII->insertNoop(*BB, BB->end());
}
void ScheduleDAG::EmitCrossRCCopy(SUnit *SU,
DenseMap<SUnit*, unsigned> &VRBaseMap) {
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl) continue; // ignore chain preds
if (!I->Dep->Node) {
// Copy to physical register.
DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->Dep);
assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
// Find the destination physical register.
unsigned Reg = 0;
for (SUnit::const_succ_iterator II = SU->Succs.begin(),
EE = SU->Succs.end(); II != EE; ++II) {
if (I->Reg) {
Reg = I->Reg;
break;
}
}
assert(I->Reg && "Unknown physical register!");
TII->copyRegToReg(*BB, BB->end(), Reg, VRI->second,
SU->CopyDstRC, SU->CopySrcRC);
} else {
// Copy from physical register.
assert(I->Reg && "Unknown physical register!");
unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase));
assert(isNew && "Node emitted out of order - early");
TII->copyRegToReg(*BB, BB->end(), VRBase, I->Reg,
SU->CopyDstRC, SU->CopySrcRC);
}
break;
}
}
/// regIsLive - Return true if the specified register is live due to a
/// live in copy.
static bool regIsLive(unsigned Reg, BitVector &LiveRegs,
const TargetRegisterInfo *TRI) {
if (LiveRegs[Reg])
return true;
for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
if (LiveRegs[*AS])
return true;
return false;
}
/// regIsClobbered - Return true if the specified register is defined in
/// between the two specific instructions.
static bool regIsClobbered(unsigned Reg, MachineBasicBlock *MBB,
MachineBasicBlock::iterator InsertPos,
MachineBasicBlock::iterator UsePos,
const TargetRegisterInfo *TRI) {
for (MachineBasicBlock::iterator I = InsertPos; I != UsePos; ++I) {
MachineInstr *MI = I;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isRegister() || !MO.isDef())
continue;
unsigned DefReg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(DefReg))
continue;
if (TRI->regsOverlap(DefReg, Reg))
return true;
}
}
return false;
}
/// EmitLiveInCopy - Emit a copy for a live in physical register. If the
/// physical register has only a single copy use, then coalesced the copy
/// if possible. It returns the destination register of the emitted copy
/// if it is a physical register; otherwise it returns zero.
unsigned ScheduleDAG::EmitLiveInCopy(MachineBasicBlock *MBB,
MachineBasicBlock::iterator &InsertPos,
unsigned VirtReg, unsigned PhysReg,
const TargetRegisterClass *RC,
BitVector &LiveRegsBefore,
BitVector &LiveRegsAfter) {
unsigned NumUses = 0;
MachineInstr *UseMI = NULL;
for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(VirtReg),
UE = MRI.use_end(); UI != UE; ++UI) {
UseMI = &*UI;
if (++NumUses > 1)
break;
}
// If the number of uses is not one, or the use is not a move instruction,
// don't coalesce.
unsigned SrcReg, DstReg;
if (NumUses != 1 ||
!TII->isMoveInstr(*UseMI, SrcReg, DstReg)) {
TII->copyRegToReg(*MBB, InsertPos, VirtReg, PhysReg, RC, RC);
return 0;
}
// Coalesce away a virtual register to virtual register copy.
if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
TII->copyRegToReg(*MBB, InsertPos, DstReg, PhysReg, RC, RC);
if (&*InsertPos == UseMI) ++InsertPos;
MBB->erase(UseMI);
return 0;
}
// If the destination is a physical register, check if it's safe to
// coalesce. If there is a def of the register between the insertion point and
// the use, then it's not safe.
if (regIsClobbered(DstReg, MBB, InsertPos, UseMI, TRI)) {
TII->copyRegToReg(*MBB, InsertPos, VirtReg, PhysReg, RC, RC);
return 0;
}
// Also check already processed livein copies and determine the safe location
// to insert the copy. e.g. Suppose livein r0 is already processed and now
// we are inserting r1 copy to vr1025 which will be coalesced to r0.
// vr1024 = r0
// <this is the insertion pt>
// ...
// It's safe to insert the copy from r1 to r0.
// vr1024 = r0
// r0 = r1
//
// However, if livein r0 copy is coalesced to r1:
// r1 = r0
// <insertion pt>
// ...
// Then it's not safe to insert the copy from r1 to r0 at the insertion pt.
// Nor is it safe to insert it at the start of the MBB.
//
// If livein r3 is already processed and it's coalesced to r1.
// <begin of MBB> -- safe to insert here
// r1 = r3
// <insertion pt> -- not safe
// Then it's safe to insert at the start of the MBB.
if (regIsLive(DstReg, LiveRegsAfter, TRI)) {
if (regIsLive(PhysReg, LiveRegsBefore, TRI)) {
// FIXME: Still possible to find a safe place to insert the copy.
TII->copyRegToReg(*MBB, InsertPos, VirtReg, PhysReg, RC, RC);
return 0;
}
TII->copyRegToReg(*MBB, MBB->begin(), DstReg, PhysReg, RC, RC);
if (&*InsertPos == UseMI) ++InsertPos;
MBB->erase(UseMI);
return DstReg;
}
TII->copyRegToReg(*MBB, InsertPos, DstReg, PhysReg, RC, RC);
if (&*InsertPos == UseMI) ++InsertPos;
MBB->erase(UseMI);
return DstReg;
}
/// EmitLiveInCopies - If this is the first basic block in the function,
/// and if it has live ins that need to be copied into vregs, emit the
/// copies into the top of the block.
void ScheduleDAG::EmitLiveInCopies(MachineBasicBlock *MBB) {
BitVector LiveRegsBefore; // Live registers before insertion pt.
BitVector LiveRegsAfter; // Live registers after insertion pt.
LiveRegsBefore.resize(TRI->getNumRegs());
LiveRegsAfter.resize(TRI->getNumRegs());
MachineBasicBlock::iterator InsertPos = MBB->begin();
for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
E = MRI.livein_end(); LI != E; ++LI)
if (LI->second) {
const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
unsigned Reg = EmitLiveInCopy(MBB, InsertPos, LI->second, LI->first, RC,
LiveRegsBefore, LiveRegsAfter);
if (Reg) {
LiveRegsAfter.set(Reg);
for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs)
LiveRegsAfter.set(SubReg);
}
LiveRegsBefore.set(LI->first);
for (const unsigned *SubRegs = TRI->getSubRegisters(LI->first);
unsigned SubReg = *SubRegs; ++SubRegs)
LiveRegsBefore.set(SubReg);
}
}
/// EmitSchedule - Emit the machine code in scheduled order.
void ScheduleDAG::EmitSchedule() {
bool isEntryBB = &MF->front() == BB;
if (isEntryBB && !SchedLiveInCopies) {
// If this is the first basic block in the function, and if it has live ins
// that need to be copied into vregs, emit the copies into the top of the
// block before emitting the code for the block.
for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
E = MRI.livein_end(); LI != E; ++LI)
if (LI->second) {
const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
TII->copyRegToReg(*MF->begin(), MF->begin()->end(), LI->second,
LI->first, RC, RC);
}
}
// Finally, emit the code for all of the scheduled instructions.
DenseMap<SDOperand, unsigned> VRBaseMap;
DenseMap<SUnit*, unsigned> CopyVRBaseMap;
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
if (SUnit *SU = Sequence[i]) {
for (unsigned j = 0, ee = SU->FlaggedNodes.size(); j != ee; ++j)
EmitNode(SU->FlaggedNodes[j], SU->InstanceNo, VRBaseMap);
if (SU->Node)
EmitNode(SU->Node, SU->InstanceNo, VRBaseMap);
else
EmitCrossRCCopy(SU, CopyVRBaseMap);
} else {
// Null SUnit* is a noop.
EmitNoop();
}
}
if (isEntryBB && SchedLiveInCopies)
EmitLiveInCopies(MF->begin());
}
/// dump - dump the schedule.
void ScheduleDAG::dumpSchedule() const {
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
if (SUnit *SU = Sequence[i])
SU->dump(&DAG);
else
cerr << "**** NOOP ****\n";
}
}
/// Run - perform scheduling.
///
MachineBasicBlock *ScheduleDAG::Run() {
Schedule();
return BB;
}
/// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
/// a group of nodes flagged together.
void SUnit::dump(const SelectionDAG *G) const {
cerr << "SU(" << NodeNum << "): ";
if (Node)
Node->dump(G);
else
cerr << "CROSS RC COPY ";
cerr << "\n";
if (FlaggedNodes.size() != 0) {
for (unsigned i = 0, e = FlaggedNodes.size(); i != e; i++) {
cerr << " ";
FlaggedNodes[i]->dump(G);
cerr << "\n";
}
}
}
void SUnit::dumpAll(const SelectionDAG *G) const {
dump(G);
cerr << " # preds left : " << NumPredsLeft << "\n";
cerr << " # succs left : " << NumSuccsLeft << "\n";
cerr << " Latency : " << Latency << "\n";
cerr << " Depth : " << Depth << "\n";
cerr << " Height : " << Height << "\n";
if (Preds.size() != 0) {
cerr << " Predecessors:\n";
for (SUnit::const_succ_iterator I = Preds.begin(), E = Preds.end();
I != E; ++I) {
if (I->isCtrl)
cerr << " ch #";
else
cerr << " val #";
cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")";
if (I->isSpecial)
cerr << " *";
cerr << "\n";
}
}
if (Succs.size() != 0) {
cerr << " Successors:\n";
for (SUnit::const_succ_iterator I = Succs.begin(), E = Succs.end();
I != E; ++I) {
if (I->isCtrl)
cerr << " ch #";
else
cerr << " val #";
cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")";
if (I->isSpecial)
cerr << " *";
cerr << "\n";
}
}
cerr << "\n";
}