Revert 107840 107839 107813 107804 107800 107797 107791.

Debug info intrinsics win for now.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@107850 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman 2010-07-08 01:00:56 +00:00
parent d9642faf7c
commit f595141525
22 changed files with 367 additions and 540 deletions

View File

@ -188,7 +188,8 @@ public:
/// CheckReturn - Analyze the return values of a function, returning /// CheckReturn - Analyze the return values of a function, returning
/// true if the return can be performed without sret-demotion, and /// true if the return can be performed without sret-demotion, and
/// false otherwise. /// false otherwise.
bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags, bool CheckReturn(const SmallVectorImpl<EVT> &OutTys,
const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
CCAssignFn Fn); CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call, /// AnalyzeCallOperands - Analyze the outgoing arguments to a call,

View File

@ -19,7 +19,6 @@
#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallSet.h"
#endif #endif
#include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
namespace llvm { namespace llvm {
@ -45,6 +44,7 @@ class TargetRegisterInfo;
/// lowering, but runs quickly. /// lowering, but runs quickly.
class FastISel { class FastISel {
protected: protected:
MachineBasicBlock *MBB;
DenseMap<const Value *, unsigned> LocalValueMap; DenseMap<const Value *, unsigned> LocalValueMap;
FunctionLoweringInfo &FuncInfo; FunctionLoweringInfo &FuncInfo;
MachineRegisterInfo &MRI; MachineRegisterInfo &MRI;
@ -56,17 +56,23 @@ protected:
const TargetInstrInfo &TII; const TargetInstrInfo &TII;
const TargetLowering &TLI; const TargetLowering &TLI;
const TargetRegisterInfo &TRI; const TargetRegisterInfo &TRI;
MachineBasicBlock::iterator LastLocalValue; bool IsBottomUp;
public: public:
/// getLastLocalValue - Return the position of the last instruction
/// emitted for materializing constants for use in the current block.
MachineBasicBlock::iterator getLastLocalValue() { return LastLocalValue; }
/// startNewBlock - Set the current block to which generated machine /// startNewBlock - Set the current block to which generated machine
/// instructions will be appended, and clear the local CSE map. /// instructions will be appended, and clear the local CSE map.
/// ///
void startNewBlock(); void startNewBlock(MachineBasicBlock *mbb) {
setCurrentBlock(mbb);
LocalValueMap.clear();
}
/// setCurrentBlock - Set the current block to which generated machine
/// instructions will be appended.
///
void setCurrentBlock(MachineBasicBlock *mbb) {
MBB = mbb;
}
/// getCurDebugLoc() - Return current debug location information. /// getCurDebugLoc() - Return current debug location information.
DebugLoc getCurDebugLoc() const { return DL; } DebugLoc getCurDebugLoc() const { return DL; }

View File

@ -25,7 +25,6 @@
#endif #endif
#include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/ISDOpcodes.h" #include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/CallSite.h" #include "llvm/Support/CallSite.h"
#include <vector> #include <vector>
@ -81,12 +80,6 @@ public:
/// function arguments that are inserted after scheduling is completed. /// function arguments that are inserted after scheduling is completed.
SmallVector<MachineInstr*, 8> ArgDbgValues; SmallVector<MachineInstr*, 8> ArgDbgValues;
/// MBB - The current block.
MachineBasicBlock *MBB;
/// MBB - The current insert position inside the current block.
MachineBasicBlock::iterator InsertPt;
#ifndef NDEBUG #ifndef NDEBUG
SmallSet<const Instruction *, 8> CatchInfoLost; SmallSet<const Instruction *, 8> CatchInfoLost;
SmallSet<const Instruction *, 8> CatchInfoFound; SmallSet<const Instruction *, 8> CatchInfoFound;

View File

@ -280,14 +280,15 @@ private:
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs, SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo); const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
void PrepareEHLandingPad(); void PrepareEHLandingPad(MachineBasicBlock *BB);
void SelectAllBasicBlocks(const Function &Fn); void SelectAllBasicBlocks(const Function &Fn);
void FinishBasicBlock(); void FinishBasicBlock(MachineBasicBlock *BB);
void SelectBasicBlock(BasicBlock::const_iterator Begin, MachineBasicBlock *SelectBasicBlock(MachineBasicBlock *BB,
BasicBlock::const_iterator End, BasicBlock::const_iterator Begin,
bool &HadTailCall); BasicBlock::const_iterator End,
void CodeGenAndEmitDAG(); bool &HadTailCall);
MachineBasicBlock *CodeGenAndEmitDAG(MachineBasicBlock *BB);
void LowerArguments(const BasicBlock *BB); void LowerArguments(const BasicBlock *BB);
void ComputeLiveOutVRegInfo(); void ComputeLiveOutVRegInfo();

View File

@ -24,7 +24,6 @@
#include "llvm/CallingConv.h" #include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h" #include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/ADT/APFloat.h" #include "llvm/ADT/APFloat.h"
@ -1160,7 +1159,8 @@ public:
/// registers. If false is returned, an sret-demotion is performed. /// registers. If false is returned, an sret-demotion is performed.
/// ///
virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<EVT> &OutTys,
const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const LLVMContext &Context) const
{ {
// Return true by default to get preexisting behavior. // Return true by default to get preexisting behavior.
@ -1656,15 +1656,6 @@ protected:
/// optimization. /// optimization.
bool benefitFromCodePlacementOpt; bool benefitFromCodePlacementOpt;
}; };
/// GetReturnInfo - Given an LLVM IR type and return type attributes,
/// compute the return value EVTs and flags, and optionally also
/// the offsets, if the return value is being lowered to memory.
void GetReturnInfo(const Type* ReturnType, Attributes attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI,
SmallVectorImpl<uint64_t> *Offsets = 0);
} // end llvm namespace } // end llvm namespace
#endif #endif

View File

@ -80,12 +80,13 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
/// CheckReturn - Analyze the return values of a function, returning true if /// CheckReturn - Analyze the return values of a function, returning true if
/// the return can be performed without sret-demotion, and false otherwise. /// the return can be performed without sret-demotion, and false otherwise.
bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool CCState::CheckReturn(const SmallVectorImpl<EVT> &OutTys,
const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
CCAssignFn Fn) { CCAssignFn Fn) {
// Determine which register each value should be copied into. // Determine which register each value should be copied into.
for (unsigned i = 0, e = Outs.size(); i != e; ++i) { for (unsigned i = 0, e = OutTys.size(); i != e; ++i) {
EVT VT = Outs[i].VT; EVT VT = OutTys[i];
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; ISD::ArgFlagsTy ArgFlags = ArgsFlags[i];
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
return false; return false;
} }

View File

@ -329,15 +329,19 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
if (OptLevel != CodeGenOpt::None) if (OptLevel != CodeGenOpt::None)
PM.add(createOptimizePHIsPass()); PM.add(createOptimizePHIsPass());
if (OptLevel != CodeGenOpt::None) { // Delete dead machine instructions regardless of optimization level.
// With optimization, dead code should already be eliminated. However //
// there is one known exception: lowered code for arguments that are only // At -O0, fast-isel frequently creates dead instructions.
// used by tail calls, where the tail calls reuse the incoming stack //
// arguments directly (see t11 in test/CodeGen/X86/sibcall.ll). // With optimization, dead code should already be eliminated. However
PM.add(createDeadMachineInstructionElimPass()); // there is one known exception: lowered code for arguments that are only
printAndVerify(PM, "After codegen DCE pass", // used by tail calls, where the tail calls reuse the incoming stack
/* allowDoubleDefs= */ true); // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
PM.add(createDeadMachineInstructionElimPass());
printAndVerify(PM, "After codegen DCE pass",
/* allowDoubleDefs= */ true);
if (OptLevel != CodeGenOpt::None) {
PM.add(createOptimizeExtsPass()); PM.add(createOptimizeExtsPass());
if (!DisableMachineLICM) if (!DisableMachineLICM)
PM.add(createMachineLICMPass()); PM.add(createMachineLICMPass());

View File

@ -57,17 +57,6 @@
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
using namespace llvm; using namespace llvm;
/// startNewBlock - Set the current block to which generated machine
/// instructions will be appended, and clear the local CSE map.
///
void FastISel::startNewBlock() {
LocalValueMap.clear();
// Start out as end(), meaining no local-value instructions have
// been emitted.
LastLocalValue = FuncInfo.MBB->end();
}
bool FastISel::hasTrivialKill(const Value *V) const { bool FastISel::hasTrivialKill(const Value *V) const {
// Don't consider constants or arguments to have trivial kills. // Don't consider constants or arguments to have trivial kills.
const Instruction *I = dyn_cast<Instruction>(V); const Instruction *I = dyn_cast<Instruction>(V);
@ -120,11 +109,12 @@ unsigned FastISel::getRegForValue(const Value *V) {
// In bottom-up mode, just create the virtual register which will be used // In bottom-up mode, just create the virtual register which will be used
// to hold the value. It will be materialized later. // to hold the value. It will be materialized later.
if (isa<Instruction>(V) && if (IsBottomUp) {
(!isa<AllocaInst>(V) ||
!FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) {
Reg = createResultReg(TLI.getRegClassFor(VT)); Reg = createResultReg(TLI.getRegClassFor(VT));
FuncInfo.ValueMap[V] = Reg; if (isa<Instruction>(V))
FuncInfo.ValueMap[V] = Reg;
else
LocalValueMap[V] = Reg;
return Reg; return Reg;
} }
@ -179,8 +169,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
Reg = lookUpRegForValue(Op); Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) { } else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT)); Reg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
} }
// If target-independent code couldn't handle the value, give target-specific // If target-independent code couldn't handle the value, give target-specific
@ -190,10 +179,8 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
// Don't cache constant materializations in the general ValueMap. // Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate. // To do so would require tracking what uses they dominate.
if (Reg != 0) { if (Reg != 0)
LocalValueMap[V] = Reg; LocalValueMap[V] = Reg;
LastLocalValue = MRI.getVRegDef(Reg);
}
return Reg; return Reg;
} }
@ -222,20 +209,12 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
unsigned &AssignedReg = FuncInfo.ValueMap[I]; unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0) if (AssignedReg == 0)
// Use the new register.
AssignedReg = Reg; AssignedReg = Reg;
else if (Reg != AssignedReg) { else if (Reg != AssignedReg) {
// We already have a register for this value. Replace uses of const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
// the existing register with uses of the new one. TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
MRI.replaceRegWith(AssignedReg, Reg); Reg, RegClass, RegClass, DL);
// Replace uses of the existing register in PHINodesToUpdate too.
for (unsigned i = 0, e = FuncInfo.PHINodesToUpdate.size(); i != e; ++i)
if (FuncInfo.PHINodesToUpdate[i].second == AssignedReg)
FuncInfo.PHINodesToUpdate[i].second = Reg;
// And update the ValueMap.
AssignedReg = Reg;
} }
return AssignedReg; return AssignedReg;
} }
@ -455,28 +434,23 @@ bool FastISel::SelectCall(const User *I) {
if (!V) { if (!V) {
// Currently the optimizer can produce this; insert an undef to // Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this. // help debugging. Probably the optimizer should not do this.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
.addReg(0U).addImm(DI->getOffset()) addMetadata(DI->getVariable());
.addMetadata(DI->getVariable());
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
.addImm(CI->getZExtValue()).addImm(DI->getOffset()) addMetadata(DI->getVariable());
.addMetadata(DI->getVariable());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) { } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
.addFPImm(CF).addImm(DI->getOffset()) addMetadata(DI->getVariable());
.addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) { } else if (unsigned Reg = lookUpRegForValue(V)) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
.addReg(Reg, RegState::Debug).addImm(DI->getOffset()) addMetadata(DI->getVariable());
.addMetadata(DI->getVariable());
} else { } else {
// We can't yet handle anything else here because it would require // We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info. // generating code, thus altering codegen because of debug info.
// Insert an undef so we can see what we dropped. // Insert an undef so we can see what we dropped.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
.addReg(0U).addImm(DI->getOffset()) addMetadata(DI->getVariable());
.addMetadata(DI->getVariable());
} }
return true; return true;
} }
@ -485,13 +459,12 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) { switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
default: break; default: break;
case TargetLowering::Expand: { case TargetLowering::Expand: {
assert(FuncInfo.MBB->isLandingPad() && assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
"Call to eh.exception not in landing pad!");
unsigned Reg = TLI.getExceptionAddressRegister(); unsigned Reg = TLI.getExceptionAddressRegister();
const TargetRegisterClass *RC = TLI.getRegClassFor(VT); const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
ResultReg, Reg, RC, RC, DL); Reg, RC, RC, DL);
assert(InsertedCopy && "Can't copy address registers!"); assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy; InsertedCopy = InsertedCopy;
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
@ -505,23 +478,23 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) { switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
default: break; default: break;
case TargetLowering::Expand: { case TargetLowering::Expand: {
if (FuncInfo.MBB->isLandingPad()) if (MBB->isLandingPad())
AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB); AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), MBB);
else { else {
#ifndef NDEBUG #ifndef NDEBUG
FuncInfo.CatchInfoLost.insert(cast<CallInst>(I)); FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
#endif #endif
// FIXME: Mark exception selector register as live in. Hack for PR1508. // FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister(); unsigned Reg = TLI.getExceptionSelectorRegister();
if (Reg) FuncInfo.MBB->addLiveIn(Reg); if (Reg) MBB->addLiveIn(Reg);
} }
unsigned Reg = TLI.getExceptionSelectorRegister(); unsigned Reg = TLI.getExceptionSelectorRegister();
EVT SrcVT = TLI.getPointerTy(); EVT SrcVT = TLI.getPointerTy();
const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
ResultReg, Reg, RC, RC, DL); RC, RC, DL);
assert(InsertedCopy && "Can't copy address registers!"); assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy; InsertedCopy = InsertedCopy;
@ -640,9 +613,8 @@ bool FastISel::SelectBitCast(const User *I) {
TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
ResultReg = createResultReg(DstClass); ResultReg = createResultReg(DstClass);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
ResultReg, Op0, Op0, DstClass, SrcClass, DL);
DstClass, SrcClass, DL);
if (!InsertedCopy) if (!InsertedCopy)
ResultReg = 0; ResultReg = 0;
} }
@ -690,14 +662,13 @@ FastISel::SelectInstruction(const Instruction *I) {
/// the CFG. /// the CFG.
void void
FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) { FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) { if (MBB->isLayoutSuccessor(MSucc)) {
// The unconditional fall-through case, which needs no instructions. // The unconditional fall-through case, which needs no instructions.
} else { } else {
// The unconditional branch case. // The unconditional branch case.
TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL, TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>(), DL);
SmallVector<MachineOperand, 0>(), DL);
} }
FuncInfo.MBB->addSuccessor(MSucc); MBB->addSuccessor(MSucc);
} }
/// SelectFNeg - Emit an FNeg operation. /// SelectFNeg - Emit an FNeg operation.
@ -756,15 +727,11 @@ FastISel::SelectLoad(const User *I) {
BasicBlock::iterator ScanFrom = LI; BasicBlock::iterator ScanFrom = LI;
if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(), if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(),
LI->getParent(), ScanFrom)) { LI->getParent(), ScanFrom)) {
if (!isa<Instruction>(V) ||
cast<Instruction>(V)->getParent() == LI->getParent() ||
(isa<AllocaInst>(V) && FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) {
unsigned ResultReg = getRegForValue(V); unsigned ResultReg = getRegForValue(V);
if (ResultReg != 0) { if (ResultReg != 0) {
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
}
} }
} }
@ -887,7 +854,8 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
} }
FastISel::FastISel(FunctionLoweringInfo &funcInfo) FastISel::FastISel(FunctionLoweringInfo &funcInfo)
: FuncInfo(funcInfo), : MBB(0),
FuncInfo(funcInfo),
MRI(FuncInfo.MF->getRegInfo()), MRI(FuncInfo.MF->getRegInfo()),
MFI(*FuncInfo.MF->getFrameInfo()), MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()), MCP(*FuncInfo.MF->getConstantPool()),
@ -895,7 +863,8 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo)
TD(*TM.getTargetData()), TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()), TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()), TLI(*TM.getTargetLowering()),
TRI(*TM.getRegisterInfo()) { TRI(*TM.getRegisterInfo()),
IsBottomUp(false) {
} }
FastISel::~FastISel() {} FastISel::~FastISel() {}
@ -1024,7 +993,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode); const TargetInstrDesc &II = TII.get(MachineInstOpcode);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg); BuildMI(MBB, DL, II, ResultReg);
return ResultReg; return ResultReg;
} }
@ -1035,14 +1004,11 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode); const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) BuildMI(MBB, DL, II, ResultReg).addReg(Op0, Op0IsKill * RegState::Kill);
.addReg(Op0, Op0IsKill * RegState::Kill);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II).addReg(Op0, Op0IsKill * RegState::Kill);
.addReg(Op0, Op0IsKill * RegState::Kill); bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, II.ImplicitDefs[0], RC, RC, DL);
ResultReg, II.ImplicitDefs[0],
RC, RC, DL);
if (!InsertedCopy) if (!InsertedCopy)
ResultReg = 0; ResultReg = 0;
} }
@ -1058,16 +1024,15 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode); const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill); .addReg(Op1, Op1IsKill * RegState::Kill);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill); .addReg(Op1, Op1IsKill * RegState::Kill);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
ResultReg, II.ImplicitDefs[0], II.ImplicitDefs[0], RC, RC, DL);
RC, RC, DL);
if (!InsertedCopy) if (!InsertedCopy)
ResultReg = 0; ResultReg = 0;
} }
@ -1082,16 +1047,15 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode); const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm); .addImm(Imm);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm); .addImm(Imm);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
ResultReg, II.ImplicitDefs[0], II.ImplicitDefs[0], RC, RC, DL);
RC, RC, DL);
if (!InsertedCopy) if (!InsertedCopy)
ResultReg = 0; ResultReg = 0;
} }
@ -1106,16 +1070,15 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode); const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm); .addFPImm(FPImm);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm); .addFPImm(FPImm);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
ResultReg, II.ImplicitDefs[0], II.ImplicitDefs[0], RC, RC, DL);
RC, RC, DL);
if (!InsertedCopy) if (!InsertedCopy)
ResultReg = 0; ResultReg = 0;
} }
@ -1131,18 +1094,17 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode); const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm); .addImm(Imm);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm); .addImm(Imm);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
ResultReg, II.ImplicitDefs[0], II.ImplicitDefs[0], RC, RC, DL);
RC, RC, DL);
if (!InsertedCopy) if (!InsertedCopy)
ResultReg = 0; ResultReg = 0;
} }
@ -1156,12 +1118,11 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode); const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm); BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm); BuildMI(MBB, DL, II).addImm(Imm);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
ResultReg, II.ImplicitDefs[0], II.ImplicitDefs[0], RC, RC, DL);
RC, RC, DL);
if (!InsertedCopy) if (!InsertedCopy)
ResultReg = 0; ResultReg = 0;
} }
@ -1177,16 +1138,15 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG); const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) BuildMI(MBB, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Idx); .addImm(Idx);
else { else {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(MBB, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Idx); .addImm(Idx);
bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
ResultReg, II.ImplicitDefs[0], II.ImplicitDefs[0], RC, RC, DL);
RC, RC, DL);
if (!InsertedCopy) if (!InsertedCopy)
ResultReg = 0; ResultReg = 0;
} }

View File

@ -78,13 +78,6 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
MF = &mf; MF = &mf;
RegInfo = &MF->getRegInfo(); RegInfo = &MF->getRegInfo();
// Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(Fn->getReturnType(),
Fn->getAttributes().getRetAttributes(), Outs, TLI);
CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), Fn->isVarArg(),
Outs, Fn->getContext());
// Create a vreg for each argument register that is not dead and is used // Create a vreg for each argument register that is not dead and is used
// outside of the entry block for the function. // outside of the entry block for the function.
for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end(); for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();

View File

@ -732,11 +732,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
if (II.usesCustomInsertionHook()) { if (II.usesCustomInsertionHook()) {
// Insert this instruction into the basic block using a target // Insert this instruction into the basic block using a target
// specific inserter which may returns a new basic block. // specific inserter which may returns a new basic block.
MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB); MBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
if (NewMBB != MBB) { InsertPos = MBB->end();
MBB = NewMBB;
InsertPos = NewMBB->end();
}
return; return;
} }

View File

@ -951,16 +951,79 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
// If this is an instruction which fast-isel has deferred, select it now. // If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) { if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst); assert(Inst->isSafeToSpeculativelyExecute() &&
RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType()); "Instruction with side effects deferred!");
SDValue Chain = DAG.getEntryNode(); visit(*Inst);
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL); DenseMap<const Value *, SDValue>::iterator NIt = NodeMap.find(Inst);
if (NIt != NodeMap.end() && NIt->second.getNode())
return NIt->second;
} }
llvm_unreachable("Can't get register for value!"); llvm_unreachable("Can't get register for value!");
return SDValue(); return SDValue();
} }
/// Get the EVTs and ArgFlags collections that represent the legalized return
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
static void getReturnInfo(const Type* ReturnType,
Attributes attr, SmallVectorImpl<EVT> &OutVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
const TargetLowering &TLI,
SmallVectorImpl<uint64_t> *Offsets = 0) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, ReturnType, ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
unsigned Offset = 0;
for (unsigned j = 0, f = NumValues; j != f; ++j) {
EVT VT = ValueVTs[j];
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
if (attr & Attribute::SExt)
ExtendKind = ISD::SIGN_EXTEND;
else if (attr & Attribute::ZExt)
ExtendKind = ISD::ZERO_EXTEND;
// FIXME: C calling convention requires the return type to be promoted to
// at least 32-bit. But this is not necessary for non-C calling
// conventions. The frontend should mark functions whose return values
// require promoting with signext or zeroext attributes.
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
if (VT.bitsLT(MinVT))
VT = MinVT;
}
unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
PartVT.getTypeForEVT(ReturnType->getContext()));
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
if (attr & Attribute::InReg)
Flags.setInReg();
// Propagate extension type if any
if (attr & Attribute::SExt)
Flags.setSExt();
else if (attr & Attribute::ZExt)
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i) {
OutVTs.push_back(PartVT);
OutFlags.push_back(Flags);
if (Offsets)
{
Offsets->push_back(Offset);
Offset += PartSize;
}
}
}
}
void SelectionDAGBuilder::visitRet(const ReturnInst &I) { void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
SDValue Chain = getControlRoot(); SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs; SmallVector<ISD::OutputArg, 8> Outs;
@ -1257,7 +1320,7 @@ SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
} }
void SelectionDAGBuilder::visitBr(const BranchInst &I) { void SelectionDAGBuilder::visitBr(const BranchInst &I) {
MachineBasicBlock *BrMBB = FuncInfo.MBB; MachineBasicBlock *BrMBB = FuncInfo.MBBMap[I.getParent()];
// Update machine-CFG edges. // Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
@ -1583,7 +1646,7 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
} }
void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
MachineBasicBlock *InvokeMBB = FuncInfo.MBB; MachineBasicBlock *InvokeMBB = FuncInfo.MBBMap[I.getParent()];
// Retrieve successors. // Retrieve successors.
MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
@ -2111,7 +2174,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
} }
void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
MachineBasicBlock *SwitchMBB = FuncInfo.MBB; MachineBasicBlock *SwitchMBB = FuncInfo.MBBMap[SI.getParent()];
// Figure out which block is immediately after the current one. // Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0; MachineBasicBlock *NextBlock = 0;
@ -2177,7 +2240,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
} }
void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; MachineBasicBlock *IndirectBrMBB = FuncInfo.MBBMap[I.getParent()];
// Update machine-CFG edges with unique successors. // Update machine-CFG edges with unique successors.
SmallVector<BasicBlock*, 32> succs; SmallVector<BasicBlock*, 32> succs;
@ -3837,7 +3900,7 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const DbgValueInst &DI,
if (DV.isInlinedFnArgument(MF.getFunction())) if (DV.isInlinedFnArgument(MF.getFunction()))
return false; return false;
MachineBasicBlock *MBB = FuncInfo.MBB; MachineBasicBlock *MBB = FuncInfo.MBBMap[DI.getParent()];
if (MBB != &MF.front()) if (MBB != &MF.front())
return false; return false;
@ -4100,7 +4163,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
} }
case Intrinsic::eh_exception: { case Intrinsic::eh_exception: {
// Insert the EXCEPTIONADDR instruction. // Insert the EXCEPTIONADDR instruction.
assert(FuncInfo.MBB->isLandingPad() && assert(FuncInfo.MBBMap[I.getParent()]->isLandingPad() &&
"Call to eh.exception not in landing pad!"); "Call to eh.exception not in landing pad!");
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other); SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
SDValue Ops[1]; SDValue Ops[1];
@ -4112,7 +4175,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
} }
case Intrinsic::eh_selector: { case Intrinsic::eh_selector: {
MachineBasicBlock *CallMBB = FuncInfo.MBB; MachineBasicBlock *CallMBB = FuncInfo.MBBMap[I.getParent()];
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
if (CallMBB->isLandingPad()) if (CallMBB->isLandingPad())
AddCatchInfo(I, &MMI, CallMBB); AddCatchInfo(I, &MMI, CallMBB);
@ -4122,7 +4185,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
#endif #endif
// FIXME: Mark exception selector register as live in. Hack for PR1508. // FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister(); unsigned Reg = TLI.getExceptionSelectorRegister();
if (Reg) FuncInfo.MBB->addLiveIn(Reg); if (Reg) FuncInfo.MBBMap[I.getParent()]->addLiveIn(Reg);
} }
// Insert the EHSELECTION instruction. // Insert the EHSELECTION instruction.
@ -4496,13 +4559,14 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
Args.reserve(CS.arg_size()); Args.reserve(CS.arg_size());
// Check whether the function can return without sret-demotion. // Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs; SmallVector<EVT, 4> OutVTs;
SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
SmallVector<uint64_t, 4> Offsets; SmallVector<uint64_t, 4> Offsets;
GetReturnInfo(RetTy, CS.getAttributes().getRetAttributes(), getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
Outs, TLI, &Offsets); OutVTs, OutsFlags, TLI, &Offsets);
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(), bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
FTy->isVarArg(), Outs, FTy->getContext()); FTy->isVarArg(), OutVTs, OutsFlags, FTy->getContext());
SDValue DemoteStackSlot; SDValue DemoteStackSlot;
@ -4595,7 +4659,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
ComputeValueVTs(TLI, PtrRetTy, PVTs); ComputeValueVTs(TLI, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register"); assert(PVTs.size() == 1 && "Pointers should fit in one register");
EVT PtrVT = PVTs[0]; EVT PtrVT = PVTs[0];
unsigned NumValues = Outs.size(); unsigned NumValues = OutVTs.size();
SmallVector<SDValue, 4> Values(NumValues); SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(NumValues); SmallVector<SDValue, 4> Chains(NumValues);
@ -4603,7 +4667,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
DemoteStackSlot, DemoteStackSlot,
DAG.getConstant(Offsets[i], PtrVT)); DAG.getConstant(Offsets[i], PtrVT));
SDValue L = DAG.getLoad(Outs[i].VT, getCurDebugLoc(), Result.second, SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
Add, NULL, Offsets[i], false, false, 1); Add, NULL, Offsets[i], false, false, 1);
Values[i] = L; Values[i] = L;
Chains[i] = L.getValue(1); Chains[i] = L.getValue(1);
@ -5895,10 +5959,15 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
SmallVector<ISD::InputArg, 16> Ins; SmallVector<ISD::InputArg, 16> Ins;
// Check whether the function can return without sret-demotion. // Check whether the function can return without sret-demotion.
SmallVector<ISD::OutputArg, 4> Outs; SmallVector<EVT, 4> OutVTs;
GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
Outs, TLI); getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
OutVTs, OutsFlags, TLI);
FuncInfo->CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(),
F.isVarArg(),
OutVTs, OutsFlags,
F.getContext());
if (!FuncInfo->CanLowerReturn) { if (!FuncInfo->CanLowerReturn) {
// Put in an sret pointer parameter before all the other parameters. // Put in an sret pointer parameter before all the other parameters.
SmallVector<EVT, 1> ValueVTs; SmallVector<EVT, 1> ValueVTs;

View File

@ -319,8 +319,9 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
return true; return true;
} }
void MachineBasicBlock *
SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin, SelectionDAGISel::SelectBasicBlock(MachineBasicBlock *BB,
BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End, BasicBlock::const_iterator End,
bool &HadTailCall) { bool &HadTailCall) {
// Lower all of the non-terminator instructions. If a call is emitted // Lower all of the non-terminator instructions. If a call is emitted
@ -335,7 +336,7 @@ SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
SDB->clear(); SDB->clear();
// Final step, emit the lowered DAG as machine code. // Final step, emit the lowered DAG as machine code.
CodeGenAndEmitDAG(); return CodeGenAndEmitDAG(BB);
} }
namespace { namespace {
@ -424,7 +425,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
} while (!Worklist.empty()); } while (!Worklist.empty());
} }
void SelectionDAGISel::CodeGenAndEmitDAG() { MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) {
std::string GroupName; std::string GroupName;
if (TimePassesIsEnabled) if (TimePassesIsEnabled)
GroupName = "Instruction Selection and Scheduling"; GroupName = "Instruction Selection and Scheduling";
@ -433,7 +434,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs || ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
ViewSUnitDAGs) ViewSUnitDAGs)
BlockName = MF->getFunction()->getNameStr() + ":" + BlockName = MF->getFunction()->getNameStr() + ":" +
FuncInfo->MBB->getBasicBlock()->getNameStr(); BB->getBasicBlock()->getNameStr();
DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump()); DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump());
@ -540,7 +541,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
{ {
NamedRegionTimer T("Instruction Scheduling", GroupName, NamedRegionTimer T("Instruction Scheduling", GroupName,
TimePassesIsEnabled); TimePassesIsEnabled);
Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt); Scheduler->Run(CurDAG, BB, BB->end());
} }
if (ViewSUnitDAGs) Scheduler->viewGraph(); if (ViewSUnitDAGs) Scheduler->viewGraph();
@ -549,8 +550,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
// inserted into. // inserted into.
{ {
NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled); NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
FuncInfo->MBB = Scheduler->EmitSchedule(); BB = Scheduler->EmitSchedule();
FuncInfo->InsertPt = Scheduler->InsertPos;
} }
// Free the scheduler state. // Free the scheduler state.
@ -562,6 +562,8 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
// Free the SelectionDAG state, now that we're finished with it. // Free the SelectionDAG state, now that we're finished with it.
CurDAG->clear(); CurDAG->clear();
return BB;
} }
void SelectionDAGISel::DoInstructionSelection() { void SelectionDAGISel::DoInstructionSelection() {
@ -623,22 +625,21 @@ void SelectionDAGISel::DoInstructionSelection() {
/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
/// do other setup for EH landing-pad blocks. /// do other setup for EH landing-pad blocks.
void SelectionDAGISel::PrepareEHLandingPad() { void SelectionDAGISel::PrepareEHLandingPad(MachineBasicBlock *BB) {
// Add a label to mark the beginning of the landing pad. Deletion of the // Add a label to mark the beginning of the landing pad. Deletion of the
// landing pad can thus be detected via the MachineModuleInfo. // landing pad can thus be detected via the MachineModuleInfo.
MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB); MCSymbol *Label = MF->getMMI().addLandingPad(BB);
const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL); const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II) BuildMI(BB, SDB->getCurDebugLoc(), II).addSym(Label);
.addSym(Label);
// Mark exception register as live in. // Mark exception register as live in.
unsigned Reg = TLI.getExceptionAddressRegister(); unsigned Reg = TLI.getExceptionAddressRegister();
if (Reg) FuncInfo->MBB->addLiveIn(Reg); if (Reg) BB->addLiveIn(Reg);
// Mark exception selector register as live in. // Mark exception selector register as live in.
Reg = TLI.getExceptionSelectorRegister(); Reg = TLI.getExceptionSelectorRegister();
if (Reg) FuncInfo->MBB->addLiveIn(Reg); if (Reg) BB->addLiveIn(Reg);
// FIXME: Hack around an exception handling flaw (PR1508): the personality // FIXME: Hack around an exception handling flaw (PR1508): the personality
// function and list of typeids logically belong to the invoke (or, if you // function and list of typeids logically belong to the invoke (or, if you
@ -651,7 +652,7 @@ void SelectionDAGISel::PrepareEHLandingPad() {
// in exceptions not being caught because no typeids are associated with // in exceptions not being caught because no typeids are associated with
// the invoke. This may not be the only way things can go wrong, but it // the invoke. This may not be the only way things can go wrong, but it
// is the only way we try to work around for the moment. // is the only way we try to work around for the moment.
const BasicBlock *LLVMBB = FuncInfo->MBB->getBasicBlock(); const BasicBlock *LLVMBB = BB->getBasicBlock();
const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator()); const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator());
if (Br && Br->isUnconditional()) { // Critical edge? if (Br && Br->isUnconditional()) { // Critical edge?
@ -675,73 +676,80 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Iterate over all basic blocks in the function. // Iterate over all basic blocks in the function.
for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
const BasicBlock *LLVMBB = &*I; const BasicBlock *LLVMBB = &*I;
FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB]; MachineBasicBlock *BB = FuncInfo->MBBMap[LLVMBB];
FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI(); BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI();
BasicBlock::const_iterator const End = LLVMBB->end(); BasicBlock::const_iterator const End = LLVMBB->end();
BasicBlock::const_iterator BI = End; BasicBlock::const_iterator BI = Begin;
// Lower any arguments needed in this block if this is the entry block.
if (LLVMBB == &Fn.getEntryBlock())
LowerArguments(LLVMBB);
// Setup an EH landing-pad block.
if (BB->isLandingPad())
PrepareEHLandingPad(BB);
// Before doing SelectionDAG ISel, see if FastISel has been requested. // Before doing SelectionDAG ISel, see if FastISel has been requested.
if (FastIS) { if (FastIS) {
FastIS->startNewBlock(); // Emit code for any incoming arguments. This must happen before
// beginning FastISel on the entry block.
if (LLVMBB == &Fn.getEntryBlock()) {
CurDAG->setRoot(SDB->getControlRoot());
SDB->clear();
BB = CodeGenAndEmitDAG(BB);
}
FastIS->startNewBlock(BB);
// Do FastISel on as many instructions as possible. // Do FastISel on as many instructions as possible.
for (; BI != Begin; --BI) { for (; BI != End; ++BI) {
const Instruction *Inst = llvm::prior(BI); #if 0
// Defer instructions with no side effects; they'll be emitted
// If we no longer require this instruction, skip it. // on-demand later.
if (!Inst->mayWriteToMemory() && if (BI->isSafeToSpeculativelyExecute() &&
!isa<TerminatorInst>(Inst) && !FuncInfo->isExportedInst(BI))
!isa<DbgInfoIntrinsic>(Inst) &&
!FuncInfo->isExportedInst(Inst))
continue; continue;
#endif
// Bottom-up: reset the insert pos at the top, after any local-value
// instructions.
MachineBasicBlock::iterator LVIP = FastIS->getLastLocalValue();
if (LVIP != FuncInfo->MBB->end())
FuncInfo->InsertPt = next(LVIP);
else
FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
// Try to select the instruction with FastISel. // Try to select the instruction with FastISel.
if (FastIS->SelectInstruction(Inst)) if (FastIS->SelectInstruction(BI))
continue; continue;
// Then handle certain instructions as single-LLVM-Instruction blocks. // Then handle certain instructions as single-LLVM-Instruction blocks.
if (isa<CallInst>(Inst)) { if (isa<CallInst>(BI)) {
++NumFastIselFailures; ++NumFastIselFailures;
if (EnableFastISelVerbose || EnableFastISelAbort) { if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel missed call: "; dbgs() << "FastISel missed call: ";
Inst->dump(); BI->dump();
} }
if (!Inst->getType()->isVoidTy() && !Inst->use_empty()) { if (!BI->getType()->isVoidTy() && !BI->use_empty()) {
unsigned &R = FuncInfo->ValueMap[Inst]; unsigned &R = FuncInfo->ValueMap[BI];
if (!R) if (!R)
R = FuncInfo->CreateRegs(Inst->getType()); R = FuncInfo->CreateRegs(BI->getType());
} }
bool HadTailCall = false; bool HadTailCall = false;
SelectBasicBlock(Inst, BI, HadTailCall); BB = SelectBasicBlock(BB, BI, llvm::next(BI), HadTailCall);
// If the call was emitted as a tail call, we're done with the block. // If the call was emitted as a tail call, we're done with the block.
if (HadTailCall) { if (HadTailCall) {
--BI; BI = End;
break; break;
} }
// If the instruction was codegen'd with multiple blocks,
// inform the FastISel object where to resume inserting.
FastIS->setCurrentBlock(BB);
continue; continue;
} }
// Otherwise, give up on FastISel for the rest of the block. // Otherwise, give up on FastISel for the rest of the block.
// For now, be a little lenient about non-branch terminators. // For now, be a little lenient about non-branch terminators.
if (!isa<TerminatorInst>(Inst) || isa<BranchInst>(Inst)) { if (!isa<TerminatorInst>(BI) || isa<BranchInst>(BI)) {
++NumFastIselFailures; ++NumFastIselFailures;
if (EnableFastISelVerbose || EnableFastISelAbort) { if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel miss: "; dbgs() << "FastISel miss: ";
Inst->dump(); BI->dump();
} }
if (EnableFastISelAbort) if (EnableFastISelAbort)
// The "fast" selector couldn't handle something and bailed. // The "fast" selector couldn't handle something and bailed.
@ -752,23 +760,15 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
} }
} }
FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
// Setup an EH landing-pad block.
if (FuncInfo->MBB->isLandingPad())
PrepareEHLandingPad();
// Lower any arguments needed in this block if this is the entry block.
if (LLVMBB == &Fn.getEntryBlock())
LowerArguments(LLVMBB);
// Run SelectionDAG instruction selection on the remainder of the block // Run SelectionDAG instruction selection on the remainder of the block
// not handled by FastISel. If FastISel is not run, this is the entire // not handled by FastISel. If FastISel is not run, this is the entire
// block. // block.
bool HadTailCall; if (BI != End) {
SelectBasicBlock(Begin, BI, HadTailCall); bool HadTailCall;
BB = SelectBasicBlock(BB, BI, End, HadTailCall);
}
FinishBasicBlock(); FinishBasicBlock(BB);
FuncInfo->PHINodesToUpdate.clear(); FuncInfo->PHINodesToUpdate.clear();
} }
@ -776,7 +776,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
} }
void void
SelectionDAGISel::FinishBasicBlock() { SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
DEBUG(dbgs() << "Total amount of phi nodes to update: " DEBUG(dbgs() << "Total amount of phi nodes to update: "
<< FuncInfo->PHINodesToUpdate.size() << "\n"; << FuncInfo->PHINodesToUpdate.size() << "\n";
@ -794,11 +794,11 @@ SelectionDAGISel::FinishBasicBlock() {
MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first; MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
assert(PHI->isPHI() && assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!"); "This is not a machine PHI node that we are updating!");
if (!FuncInfo->MBB->isSuccessor(PHI->getParent())) if (!BB->isSuccessor(PHI->getParent()))
continue; continue;
PHI->addOperand( PHI->addOperand(
MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false)); MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); PHI->addOperand(MachineOperand::CreateMBB(BB));
} }
return; return;
} }
@ -807,35 +807,33 @@ SelectionDAGISel::FinishBasicBlock() {
// Lower header first, if it wasn't already lowered // Lower header first, if it wasn't already lowered
if (!SDB->BitTestCases[i].Emitted) { if (!SDB->BitTestCases[i].Emitted) {
// Set the current basic block to the mbb we wish to insert the code into // Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->BitTestCases[i].Parent; BB = SDB->BitTestCases[i].Parent;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code // Emit the code
SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB); SDB->visitBitTestHeader(SDB->BitTestCases[i], BB);
CurDAG->setRoot(SDB->getRoot()); CurDAG->setRoot(SDB->getRoot());
SDB->clear(); SDB->clear();
CodeGenAndEmitDAG(); BB = CodeGenAndEmitDAG(BB);
} }
for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) { for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) {
// Set the current basic block to the mbb we wish to insert the code into // Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB; BB = SDB->BitTestCases[i].Cases[j].ThisBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code // Emit the code
if (j+1 != ej) if (j+1 != ej)
SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB, SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB,
SDB->BitTestCases[i].Reg, SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j], SDB->BitTestCases[i].Cases[j],
FuncInfo->MBB); BB);
else else
SDB->visitBitTestCase(SDB->BitTestCases[i].Default, SDB->visitBitTestCase(SDB->BitTestCases[i].Default,
SDB->BitTestCases[i].Reg, SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j], SDB->BitTestCases[i].Cases[j],
FuncInfo->MBB); BB);
CurDAG->setRoot(SDB->getRoot()); CurDAG->setRoot(SDB->getRoot());
SDB->clear(); SDB->clear();
CodeGenAndEmitDAG(); BB = CodeGenAndEmitDAG(BB);
} }
// Update PHI Nodes // Update PHI Nodes
@ -880,24 +878,22 @@ SelectionDAGISel::FinishBasicBlock() {
// Lower header first, if it wasn't already lowered // Lower header first, if it wasn't already lowered
if (!SDB->JTCases[i].first.Emitted) { if (!SDB->JTCases[i].first.Emitted) {
// Set the current basic block to the mbb we wish to insert the code into // Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB; BB = SDB->JTCases[i].first.HeaderBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code // Emit the code
SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first, SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
FuncInfo->MBB); BB);
CurDAG->setRoot(SDB->getRoot()); CurDAG->setRoot(SDB->getRoot());
SDB->clear(); SDB->clear();
CodeGenAndEmitDAG(); BB = CodeGenAndEmitDAG(BB);
} }
// Set the current basic block to the mbb we wish to insert the code into // Set the current basic block to the mbb we wish to insert the code into
FuncInfo->MBB = SDB->JTCases[i].second.MBB; BB = SDB->JTCases[i].second.MBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code // Emit the code
SDB->visitJumpTable(SDB->JTCases[i].second); SDB->visitJumpTable(SDB->JTCases[i].second);
CurDAG->setRoot(SDB->getRoot()); CurDAG->setRoot(SDB->getRoot());
SDB->clear(); SDB->clear();
CodeGenAndEmitDAG(); BB = CodeGenAndEmitDAG(BB);
// Update PHI Nodes // Update PHI Nodes
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size(); for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
@ -915,11 +911,11 @@ SelectionDAGISel::FinishBasicBlock() {
(MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB)); (MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB));
} }
// JT BB. Just iterate over successors here // JT BB. Just iterate over successors here
if (FuncInfo->MBB->isSuccessor(PHIBB)) { if (BB->isSuccessor(PHIBB)) {
PHI->addOperand PHI->addOperand
(MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second, (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
false)); false));
PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); PHI->addOperand(MachineOperand::CreateMBB(BB));
} }
} }
} }
@ -931,10 +927,10 @@ SelectionDAGISel::FinishBasicBlock() {
MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first; MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
assert(PHI->isPHI() && assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!"); "This is not a machine PHI node that we are updating!");
if (FuncInfo->MBB->isSuccessor(PHI->getParent())) { if (BB->isSuccessor(PHI->getParent())) {
PHI->addOperand( PHI->addOperand(
MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false)); MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); PHI->addOperand(MachineOperand::CreateMBB(BB));
} }
} }
@ -942,8 +938,7 @@ SelectionDAGISel::FinishBasicBlock() {
// additional DAGs necessary. // additional DAGs necessary.
for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) { for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
// Set the current basic block to the mbb we wish to insert the code into // Set the current basic block to the mbb we wish to insert the code into
MachineBasicBlock *ThisBB = FuncInfo->MBB = SDB->SwitchCases[i].ThisBB; MachineBasicBlock *ThisBB = BB = SDB->SwitchCases[i].ThisBB;
FuncInfo->InsertPt = FuncInfo->MBB->end();
// Determine the unique successors. // Determine the unique successors.
SmallVector<MachineBasicBlock *, 2> Succs; SmallVector<MachineBasicBlock *, 2> Succs;
@ -953,24 +948,21 @@ SelectionDAGISel::FinishBasicBlock() {
// Emit the code. Note that this could result in ThisBB being split, so // Emit the code. Note that this could result in ThisBB being split, so
// we need to check for updates. // we need to check for updates.
SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB); SDB->visitSwitchCase(SDB->SwitchCases[i], BB);
CurDAG->setRoot(SDB->getRoot()); CurDAG->setRoot(SDB->getRoot());
SDB->clear(); SDB->clear();
CodeGenAndEmitDAG(); ThisBB = CodeGenAndEmitDAG(BB);
ThisBB = FuncInfo->MBB;
// Handle any PHI nodes in successors of this chunk, as if we were coming // Handle any PHI nodes in successors of this chunk, as if we were coming
// from the original BB before switch expansion. Note that PHI nodes can // from the original BB before switch expansion. Note that PHI nodes can
// occur multiple times in PHINodesToUpdate. We have to be very careful to // occur multiple times in PHINodesToUpdate. We have to be very careful to
// handle them the right number of times. // handle them the right number of times.
for (unsigned i = 0, e = Succs.size(); i != e; ++i) { for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
FuncInfo->MBB = Succs[i]; BB = Succs[i];
FuncInfo->InsertPt = FuncInfo->MBB->end(); // BB may have been removed from the CFG if a branch was constant folded.
// FuncInfo->MBB may have been removed from the CFG if a branch was if (ThisBB->isSuccessor(BB)) {
// constant folded. for (MachineBasicBlock::iterator Phi = BB->begin();
if (ThisBB->isSuccessor(FuncInfo->MBB)) { Phi != BB->end() && Phi->isPHI();
for (MachineBasicBlock::iterator Phi = FuncInfo->MBB->begin();
Phi != FuncInfo->MBB->end() && Phi->isPHI();
++Phi) { ++Phi) {
// This value for this PHI node is recorded in PHINodesToUpdate. // This value for this PHI node is recorded in PHINodesToUpdate.
for (unsigned pn = 0; ; ++pn) { for (unsigned pn = 0; ; ++pn) {

View File

@ -20,7 +20,6 @@
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/GlobalVariable.h" #include "llvm/GlobalVariable.h"
#include "llvm/DerivedTypes.h" #include "llvm/DerivedTypes.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunction.h"
@ -839,65 +838,6 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
return 1; return 1;
} }
/// Get the EVTs and ArgFlags collections that represent the legalized return
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
/// TODO: Move this out of TargetLowering.cpp.
void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI,
SmallVectorImpl<uint64_t> *Offsets) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, ReturnType, ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) return;
unsigned Offset = 0;
for (unsigned j = 0, f = NumValues; j != f; ++j) {
EVT VT = ValueVTs[j];
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
if (attr & Attribute::SExt)
ExtendKind = ISD::SIGN_EXTEND;
else if (attr & Attribute::ZExt)
ExtendKind = ISD::ZERO_EXTEND;
// FIXME: C calling convention requires the return type to be promoted to
// at least 32-bit. But this is not necessary for non-C calling
// conventions. The frontend should mark functions whose return values
// require promoting with signext or zeroext attributes.
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
if (VT.bitsLT(MinVT))
VT = MinVT;
}
unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
PartVT.getTypeForEVT(ReturnType->getContext()));
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
if (attr & Attribute::InReg)
Flags.setInReg();
// Propagate extension type if any
if (attr & Attribute::SExt)
Flags.setSExt();
else if (attr & Attribute::ZExt)
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i) {
Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true));
if (Offsets) {
Offsets->push_back(Offset);
Offset += PartSize;
}
}
}
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual /// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm. /// alignment, not its logarithm.

View File

@ -23,7 +23,6 @@
#include "llvm/GlobalVariable.h" #include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h" #include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h" #include "llvm/IntrinsicInst.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineConstantPool.h"
@ -85,8 +84,6 @@ private:
bool X86SelectStore(const Instruction *I); bool X86SelectStore(const Instruction *I);
bool X86SelectRet(const Instruction *I);
bool X86SelectCmp(const Instruction *I); bool X86SelectCmp(const Instruction *I);
bool X86SelectZExt(const Instruction *I); bool X86SelectZExt(const Instruction *I);
@ -108,7 +105,6 @@ private:
bool X86SelectCall(const Instruction *I); bool X86SelectCall(const Instruction *I);
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false); CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false);
CCAssignFn *CCAssignFnForRet(CallingConv::ID CC, bool isTailCall = false);
const X86InstrInfo *getInstrInfo() const { const X86InstrInfo *getInstrInfo() const {
return getTargetMachine()->getInstrInfo(); return getTargetMachine()->getInstrInfo();
@ -182,20 +178,6 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC,
return CC_X86_32_C; return CC_X86_32_C;
} }
/// CCAssignFnForRet - Selects the correct CCAssignFn for a given calling
/// convention.
CCAssignFn *X86FastISel::CCAssignFnForRet(CallingConv::ID CC,
bool isTaillCall) {
if (Subtarget->is64Bit()) {
if (Subtarget->isTargetWin64())
return RetCC_X86_Win64_C;
else
return RetCC_X86_64_C;
}
return RetCC_X86_32_C;
}
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible. /// Return true and the result register by reference if it is possible.
@ -248,8 +230,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
} }
ResultReg = createResultReg(RC); ResultReg = createResultReg(RC);
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
DL, TII.get(Opc), ResultReg), AM);
return true; return true;
} }
@ -268,7 +249,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
case MVT::i1: { case MVT::i1: {
// Mask out all but lowest bit. // Mask out all but lowest bit.
unsigned AndResult = createResultReg(X86::GR8RegisterClass); unsigned AndResult = createResultReg(X86::GR8RegisterClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL,
TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1); TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
Val = AndResult; Val = AndResult;
} }
@ -285,8 +266,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
break; break;
} }
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val);
DL, TII.get(Opc)), AM).addReg(Val);
return true; return true;
} }
@ -314,8 +294,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
} }
if (Opc) { if (Opc) {
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM)
DL, TII.get(Opc)), AM)
.addImm(Signed ? (uint64_t) CI->getSExtValue() : .addImm(Signed ? (uint64_t) CI->getSExtValue() :
CI->getZExtValue()); CI->getZExtValue());
return true; return true;
@ -354,7 +333,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// Don't walk into other basic blocks; it's possible we haven't // Don't walk into other basic blocks; it's possible we haven't
// visited them yet, so the instructions may not yet be assigned // visited them yet, so the instructions may not yet be assigned
// virtual registers. // virtual registers.
if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) if (FuncInfo.MBBMap[I->getParent()] != MBB)
return false; return false;
Opcode = I->getOpcode(); Opcode = I->getOpcode();
@ -551,8 +530,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
} }
LoadReg = createResultReg(RC); LoadReg = createResultReg(RC);
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM);
DL, TII.get(Opc), LoadReg), StubAM);
// Prevent loading GV stub multiple times in same MBB. // Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = LoadReg; LocalValueMap[V] = LoadReg;
@ -678,72 +656,6 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
return X86FastEmitStore(VT, I->getOperand(0), AM); return X86FastEmitStore(VT, I->getOperand(0), AM);
} }
/// X86SelectRet - Select and emit code to implement ret instructions.
bool X86FastISel::X86SelectRet(const Instruction *I) {
const ReturnInst *Ret = cast<ReturnInst>(I);
const Function &F = *I->getParent()->getParent();
if (!FuncInfo.CanLowerReturn)
return false;
CallingConv::ID CC = F.getCallingConv();
if (CC != CallingConv::C &&
CC != CallingConv::Fast &&
CC != CallingConv::X86_FastCall)
return false;
if (Subtarget->isTargetWin64())
return false;
// fastcc with -tailcallopt is intended to provide a guaranteed
// tail call optimization. Fastisel doesn't know how to do that.
if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
return false;
// Let SDISel handle vararg functions.
if (F.isVarArg())
return false;
if (Ret->getNumOperands() > 0) {
SmallVector<ISD::OutputArg, 4> Outs;
GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
Outs, TLI);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ValLocs;
CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
CCInfo.AnalyzeReturn(Outs, CCAssignFnForRet(CC));
const Value *RV = Ret->getOperand(0);
unsigned Reg = getRegForValue(RV);
if (Reg == 0)
return false;
// Copy the return value into registers.
for (unsigned i = 0, e = ValLocs.size(); i != e; ++i) {
CCValAssign &VA = ValLocs[i];
// Don't bother handling odd stuff for now.
if (VA.getLocInfo() != CCValAssign::Full)
return false;
if (!VA.isRegLoc())
return false;
TargetRegisterClass* RC = TLI.getRegClassFor(VA.getValVT());
bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
VA.getLocReg(), Reg + VA.getValNo(),
RC, RC, DL);
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
MRI.addLiveOut(VA.getLocReg());
}
}
// Now emit the RET.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
return true;
}
/// X86SelectLoad - Select and emit code to implement load instructions. /// X86SelectLoad - Select and emit code to implement load instructions.
/// ///
bool X86FastISel::X86SelectLoad(const Instruction *I) { bool X86FastISel::X86SelectLoad(const Instruction *I) {
@ -808,9 +720,8 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
// CMPri, otherwise use CMPrr. // CMPri, otherwise use CMPrr.
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc)) BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
.addReg(Op0Reg) .addImm(Op1C->getSExtValue());
.addImm(Op1C->getSExtValue());
return true; return true;
} }
} }
@ -820,9 +731,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
unsigned Op1Reg = getRegForValue(Op1); unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false; if (Op1Reg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc)) BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
.addReg(Op0Reg)
.addReg(Op1Reg);
return true; return true;
} }
@ -844,10 +753,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
unsigned EReg = createResultReg(&X86::GR8RegClass); unsigned EReg = createResultReg(&X86::GR8RegClass);
unsigned NPReg = createResultReg(&X86::GR8RegClass); unsigned NPReg = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg); BuildMI(MBB, DL, TII.get(X86::SETEr), EReg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg);
TII.get(X86::SETNPr), NPReg); BuildMI(MBB, DL,
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
@ -858,13 +766,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
unsigned NEReg = createResultReg(&X86::GR8RegClass); unsigned NEReg = createResultReg(&X86::GR8RegClass);
unsigned PReg = createResultReg(&X86::GR8RegClass); unsigned PReg = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg);
TII.get(X86::SETNEr), NEReg); BuildMI(MBB, DL, TII.get(X86::SETPr), PReg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
TII.get(X86::SETPr), PReg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::OR8rr), ResultReg)
.addReg(PReg).addReg(NEReg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
@ -903,7 +807,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT)) if (!X86FastEmitCompare(Op0, Op1, VT))
return false; return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg); BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
@ -939,7 +843,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Try to take advantage of fallthrough opportunities. // Try to take advantage of fallthrough opportunities.
CmpInst::Predicate Predicate = CI->getPredicate(); CmpInst::Predicate Predicate = CI->getPredicate();
if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { if (MBB->isLayoutSuccessor(TrueMBB)) {
std::swap(TrueMBB, FalseMBB); std::swap(TrueMBB, FalseMBB);
Predicate = CmpInst::getInversePredicate(Predicate); Predicate = CmpInst::getInversePredicate(Predicate);
} }
@ -988,18 +892,16 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT)) if (!X86FastEmitCompare(Op0, Op1, VT))
return false; return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc)) BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB);
.addMBB(TrueMBB);
if (Predicate == CmpInst::FCMP_UNE) { if (Predicate == CmpInst::FCMP_UNE) {
// X86 requires a second branch to handle UNE (and OEQ, // X86 requires a second branch to handle UNE (and OEQ,
// which is mapped to UNE above). // which is mapped to UNE above).
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4)) BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB);
.addMBB(TrueMBB);
} }
FastEmitBranch(FalseMBB, DL); FastEmitBranch(FalseMBB, DL);
FuncInfo.MBB->addSuccessor(TrueMBB); MBB->addSuccessor(TrueMBB);
return true; return true;
} }
} else if (ExtractValueInst *EI = } else if (ExtractValueInst *EI =
@ -1025,8 +927,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned Reg = getRegForValue(EI); unsigned Reg = getRegForValue(EI);
for (MachineBasicBlock::const_reverse_iterator for (MachineBasicBlock::const_reverse_iterator
RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend(); RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
RI != RE; ++RI) {
const MachineInstr &MI = *RI; const MachineInstr &MI = *RI;
if (MI.definesRegister(Reg)) { if (MI.definesRegister(Reg)) {
@ -1051,11 +952,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned OpCode = SetMI->getOpcode(); unsigned OpCode = SetMI->getOpcode();
if (OpCode == X86::SETOr || OpCode == X86::SETBr) { if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ?
TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4)) X86::JO_4 : X86::JB_4))
.addMBB(TrueMBB); .addMBB(TrueMBB);
FastEmitBranch(FalseMBB, DL); FastEmitBranch(FalseMBB, DL);
FuncInfo.MBB->addSuccessor(TrueMBB); MBB->addSuccessor(TrueMBB);
return true; return true;
} }
} }
@ -1067,12 +968,10 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned OpReg = getRegForValue(BI->getCondition()); unsigned OpReg = getRegForValue(BI->getCondition());
if (OpReg == 0) return false; if (OpReg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
.addReg(OpReg).addReg(OpReg); BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4))
.addMBB(TrueMBB);
FastEmitBranch(FalseMBB, DL); FastEmitBranch(FalseMBB, DL);
FuncInfo.MBB->addSuccessor(TrueMBB); MBB->addSuccessor(TrueMBB);
return true; return true;
} }
@ -1129,7 +1028,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
// Fold immediate in shl(x,3). // Fold immediate in shl(x,3).
if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm), BuildMI(MBB, DL, TII.get(OpImm),
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff); ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
@ -1137,20 +1036,17 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned Op1Reg = getRegForValue(I->getOperand(1)); unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false; if (Op1Reg == 0) return false;
TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC, DL);
CReg, Op1Reg, RC, RC, DL);
// The shift instruction uses X86::CL. If we defined a super-register // The shift instruction uses X86::CL. If we defined a super-register
// of X86::CL, emit an EXTRACT_SUBREG to precisely describe what // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what
// we're doing here. // we're doing here.
if (CReg != X86::CL) if (CReg != X86::CL)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL)
TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL)
.addReg(CReg).addImm(X86::sub_8bit); .addReg(CReg).addImm(X86::sub_8bit);
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg) BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
.addReg(Op0Reg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
@ -1182,11 +1078,9 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
unsigned Op2Reg = getRegForValue(I->getOperand(2)); unsigned Op2Reg = getRegForValue(I->getOperand(2));
if (Op2Reg == 0) return false; if (Op2Reg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
.addReg(Op0Reg).addReg(Op0Reg);
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
.addReg(Op1Reg).addReg(Op2Reg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
@ -1200,9 +1094,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
unsigned OpReg = getRegForValue(V); unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false; if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR64RegisterClass); unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
TII.get(X86::CVTSS2SDrr), ResultReg)
.addReg(OpReg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
@ -1219,9 +1111,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
unsigned OpReg = getRegForValue(V); unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false; if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR32RegisterClass); unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
TII.get(X86::CVTSD2SSrr), ResultReg)
.addReg(OpReg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
@ -1256,8 +1146,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass; ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
unsigned CopyReg = createResultReg(CopyRC); unsigned CopyReg = createResultReg(CopyRC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CopyOpc), CopyReg) BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
.addReg(InputReg);
// Then issue an extract_subreg. // Then issue an extract_subreg.
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8, unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
@ -1278,18 +1167,14 @@ bool X86FastISel::X86SelectExtractValue(const Instruction *I) {
switch (CI->getIntrinsicID()) { switch (CI->getIntrinsicID()) {
default: break; default: break;
case Intrinsic::sadd_with_overflow: case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow: { case Intrinsic::uadd_with_overflow:
// Cheat a little. We know that the registers for "add" and "seto" are // Cheat a little. We know that the registers for "add" and "seto" are
// allocated sequentially. However, we only keep track of the register // allocated sequentially. However, we only keep track of the register
// for "add" in the value map. Use extractvalue's index to get the // for "add" in the value map. Use extractvalue's index to get the
// correct register for "seto". // correct register for "seto".
unsigned OpReg = getRegForValue(Agg); UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
if (OpReg == 0)
return false;
UpdateValueMap(I, OpReg + *EI->idx_begin());
return true; return true;
} }
}
} }
return false; return false;
@ -1333,7 +1218,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return false; return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg). BuildMI(MBB, DL, TII.get(OpC), ResultReg).
addImm(CI->isZero() ? -1ULL : 0); addImm(CI->isZero() ? -1ULL : 0);
UpdateValueMap(&I, ResultReg); UpdateValueMap(&I, ResultReg);
return true; return true;
@ -1347,12 +1232,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
// FIXME may need to add RegState::Debug to any registers produced, // FIXME may need to add RegState::Debug to any registers produced,
// although ESP/EBP should be the only ones at the moment. // although ESP/EBP should be the only ones at the moment.
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM). addFullAddress(BuildMI(MBB, DL, II), AM).addImm(0).
addImm(0).addMetadata(DI->getVariable()); addMetadata(DI->getVariable());
return true; return true;
} }
case Intrinsic::trap: { case Intrinsic::trap: {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP)); BuildMI(MBB, DL, TII.get(X86::TRAP));
return true; return true;
} }
case Intrinsic::sadd_with_overflow: case Intrinsic::sadd_with_overflow:
@ -1388,8 +1273,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return false; return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg) BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
.addReg(Reg1).addReg(Reg2);
unsigned DestReg1 = UpdateValueMap(&I, ResultReg); unsigned DestReg1 = UpdateValueMap(&I, ResultReg);
// If the add with overflow is an intra-block value then we just want to // If the add with overflow is an intra-block value then we just want to
@ -1407,7 +1291,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
unsigned Opc = X86::SETBr; unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow) if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
Opc = X86::SETOr; Opc = X86::SETOr;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg); BuildMI(MBB, DL, TII.get(Opc), ResultReg);
return true; return true;
} }
} }
@ -1534,8 +1418,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Issue CALLSEQ_START // Issue CALLSEQ_START
unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown)) BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes);
.addImm(NumBytes);
// Process argument: walk the register/memloc assignments, inserting // Process argument: walk the register/memloc assignments, inserting
// copies / loads. // copies / loads.
@ -1591,8 +1474,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT); TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
VA.getLocReg(), Arg, RC, RC, DL); Arg, RC, RC, DL);
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Emitted = true; Emitted = true;
RegArgs.push_back(VA.getLocReg()); RegArgs.push_back(VA.getLocReg());
@ -1618,8 +1501,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (Subtarget->isPICStyleGOT()) { if (Subtarget->isPICStyleGOT()) {
TargetRegisterClass *RC = X86::GR32RegisterClass; TargetRegisterClass *RC = X86::GR32RegisterClass;
unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC,
X86::EBX, Base, RC, RC, DL); DL);
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Emitted = true; Emitted = true;
} }
@ -1629,8 +1512,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (CalleeOp) { if (CalleeOp) {
// Register-indirect call. // Register-indirect call.
unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r; unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp);
.addReg(CalleeOp);
} else { } else {
// Direct call. // Direct call.
@ -1659,8 +1541,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
} }
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags);
.addGlobalAddress(GV, 0, OpFlags);
} }
// Add an implicit use GOT pointer in EBX. // Add an implicit use GOT pointer in EBX.
@ -1673,8 +1554,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Issue CALLSEQ_END // Issue CALLSEQ_END
unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
.addImm(NumBytes).addImm(0);
// Now handle call return value (if any). // Now handle call return value (if any).
SmallVector<unsigned, 4> UsedRegs; SmallVector<unsigned, 4> UsedRegs;
@ -1701,7 +1581,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
} }
unsigned ResultReg = createResultReg(DstRC); unsigned ResultReg = createResultReg(DstRC);
bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, ResultReg, bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
RVLocs[0].getLocReg(), DstRC, SrcRC, DL); RVLocs[0].getLocReg(), DstRC, SrcRC, DL);
assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
Emitted = true; Emitted = true;
@ -1715,21 +1595,18 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8; unsigned MemSize = ResVT.getSizeInBits()/8;
int FI = MFI.CreateStackObject(MemSize, MemSize, false); int FI = MFI.CreateStackObject(MemSize, MemSize, false);
addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
TII.get(Opc)), FI)
.addReg(ResultReg);
DstRC = ResVT == MVT::f32 DstRC = ResVT == MVT::f32
? X86::FR32RegisterClass : X86::FR64RegisterClass; ? X86::FR32RegisterClass : X86::FR64RegisterClass;
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
ResultReg = createResultReg(DstRC); ResultReg = createResultReg(DstRC);
addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
TII.get(Opc), ResultReg), FI);
} }
if (AndToI1) { if (AndToI1) {
// Mask out all but lowest bit for some call which produces an i1. // Mask out all but lowest bit for some call which produces an i1.
unsigned AndResult = createResultReg(X86::GR8RegisterClass); unsigned AndResult = createResultReg(X86::GR8RegisterClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(MBB, DL,
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
ResultReg = AndResult; ResultReg = AndResult;
} }
@ -1752,8 +1629,6 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) {
return X86SelectLoad(I); return X86SelectLoad(I);
case Instruction::Store: case Instruction::Store:
return X86SelectStore(I); return X86SelectStore(I);
case Instruction::Ret:
return X86SelectRet(I);
case Instruction::ICmp: case Instruction::ICmp:
case Instruction::FCmp: case Instruction::FCmp:
return X86SelectCmp(I); return X86SelectCmp(I);
@ -1854,8 +1729,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
else else
Opc = X86::LEA64r; Opc = X86::LEA64r;
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
addLeaAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
TII.get(Opc), ResultReg), AM);
return ResultReg; return ResultReg;
} }
return 0; return 0;
@ -1885,8 +1759,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
// Create the load from the constant pool. // Create the load from the constant pool.
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg),
TII.get(Opc), ResultReg),
MCPOffset, PICBase, OpFlag); MCPOffset, PICBase, OpFlag);
return ResultReg; return ResultReg;
@ -1909,8 +1782,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
addLeaAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
TII.get(Opc), ResultReg), AM);
return ResultReg; return ResultReg;
} }

View File

@ -1218,12 +1218,13 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
bool bool
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<EVT> &OutTys,
const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const { LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, Context); RVLocs, Context);
return CCInfo.CheckReturn(Outs, RetCC_X86); return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_X86);
} }
SDValue SDValue

View File

@ -740,7 +740,8 @@ namespace llvm {
virtual bool virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<EVT> &OutTys,
const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const; LLVMContext &Context) const;
void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results, void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,

View File

@ -1135,12 +1135,13 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
bool XCoreTargetLowering:: bool XCoreTargetLowering::
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<EVT> &OutTys,
const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const { LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs; SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, Context); RVLocs, Context);
return CCInfo.CheckReturn(Outs, RetCC_XCore); return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_XCore);
} }
SDValue SDValue

View File

@ -193,7 +193,8 @@ namespace llvm {
virtual bool virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags, const SmallVectorImpl<EVT> &OutTys,
const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
LLVMContext &Context) const; LLVMContext &Context) const;
}; };
} }

View File

@ -14,7 +14,8 @@ define i32 @test1(i32 %t3, i32* %t1) nounwind {
; X64: test1: ; X64: test1:
; X64: movslq %edi, %rax ; X64: movslq %edi, %rax
; X64: movl (%rsi,%rax,4), %e ; X64: movl (%rsi,%rax,4), %eax
; X64: ret
} }
define i32 @test2(i64 %t3, i32* %t1) nounwind { define i32 @test2(i64 %t3, i32* %t1) nounwind {

View File

@ -5,7 +5,7 @@
; CHECK: foo: ; CHECK: foo:
; CHECK-NEXT: movq %rdi, -8(%rsp) ; CHECK-NEXT: movq %rdi, -8(%rsp)
; CHECK-NEXT: movq %rsi, -16(%rsp) ; CHECK-NEXT: movq %rsi, -16(%rsp)
; CHECK-NEXT: movsd 128(%rsi,%rdi,8), %xmm0 ; CHECK: movsd 128(%rsi,%rdi,8), %xmm0
; CHECK-NEXT: ret ; CHECK-NEXT: ret
define double @foo(i64 %x, double* %p) nounwind { define double @foo(i64 %x, double* %p) nounwind {

View File

@ -49,10 +49,9 @@ entry:
ret i32 %tmp2 ret i32 %tmp2
} }
define void @ptrtoint_i1(i8* %p, i1* %q) nounwind { define i1 @ptrtoint_i1(i8* %p) nounwind {
%t = ptrtoint i8* %p to i1 %t = ptrtoint i8* %p to i1
store i1 %t, i1* %q ret i1 %t
ret void
} }
define i8* @inttoptr_i1(i1 %p) nounwind { define i8* @inttoptr_i1(i1 %p) nounwind {
%t = inttoptr i1 %p to i8* %t = inttoptr i1 %p to i8*
@ -87,8 +86,11 @@ define i8 @mul_i8(i8 %a) nounwind {
ret i8 %tmp ret i8 %tmp
} }
define void @load_store_i1(i1* %p, i1* %q) nounwind { define void @store_i1(i1* %p, i1 %t) nounwind {
%t = load i1* %p store i1 %t, i1* %p
store i1 %t, i1* %q
ret void ret void
} }
define i1 @load_i1(i1* %p) nounwind {
%t = load i1* %p
ret i1 %t
}

View File

@ -432,7 +432,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) { for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "") if ((*Memo.PhysRegs)[i] != "")
OS << " TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, " OS << " TII.copyRegToReg(*MBB, MBB->end(), "
<< (*Memo.PhysRegs)[i] << ", Op" << i << ", " << (*Memo.PhysRegs)[i] << ", Op" << i << ", "
<< "TM.getRegisterInfo()->getPhysicalRegisterRegClass(" << "TM.getRegisterInfo()->getPhysicalRegisterRegClass("
<< (*Memo.PhysRegs)[i] << "), " << (*Memo.PhysRegs)[i] << "), "
@ -526,7 +526,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) { for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "") if ((*Memo.PhysRegs)[i] != "")
OS << " TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, " OS << " TII.copyRegToReg(*MBB, MBB->end(), "
<< (*Memo.PhysRegs)[i] << ", Op" << i << ", " << (*Memo.PhysRegs)[i] << ", Op" << i << ", "
<< "TM.getRegisterInfo()->getPhysicalRegisterRegClass(" << "TM.getRegisterInfo()->getPhysicalRegisterRegClass("
<< (*Memo.PhysRegs)[i] << "), " << (*Memo.PhysRegs)[i] << "), "