Define CallSDNode, an SDNode subclass for use with ISD::CALL.

Currently it just holds the calling convention and flags
for isVarArgs and isTailCall.

And it has several utility methods, which eliminate magic
5+2*i and similar index computations in several places.

CallSDNodes are not CSE'd. Teach UpdateNodeOperands to handle
nodes that are not CSE'd gracefully.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@56183 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman 2008-09-13 01:54:27 +00:00
parent e7de7e3574
commit 095cc29f32
18 changed files with 258 additions and 199 deletions

View File

@ -143,7 +143,7 @@ public:
/// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
/// about the passed values into this state.
void AnalyzeCallOperands(SDNode *TheCall, CCAssignFn Fn);
void AnalyzeCallOperands(CallSDNode *TheCall, CCAssignFn Fn);
/// AnalyzeCallOperands - Same as above except it takes vectors of types
/// and argument flags.
@ -153,7 +153,7 @@ public:
/// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
/// incorporating info about the passed values into this state.
void AnalyzeCallResult(SDNode *TheCall, CCAssignFn Fn);
void AnalyzeCallResult(CallSDNode *TheCall, CCAssignFn Fn);
/// AnalyzeCallResult - Same as above except it's specialized for calls which
/// produce a single value.

View File

@ -463,6 +463,11 @@ public:
return getNode(ISD::MERGE_VALUES, VTs, Ops, NumOps);
}
/// getCall - Create a CALL node from the given information.
///
SDValue getCall(unsigned CallingConv, bool IsVarArgs, bool IsTailCall,
SDVTList VTs, const SDValue *Operands, unsigned NumOperands);
/// getLoad - Loads are not normal binary operators: their result type is not
/// determined by their operands, and they produce a value AND a token chain.
///
@ -731,7 +736,7 @@ public:
SDValue getShuffleScalarElt(const SDNode *N, unsigned Idx);
private:
void RemoveNodeFromCSEMaps(SDNode *N);
bool RemoveNodeFromCSEMaps(SDNode *N);
SDNode *AddNonLeafNodeToCSEMaps(SDNode *N);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,

View File

@ -179,7 +179,7 @@ namespace ISD {
///
FORMAL_ARGUMENTS,
/// RV1, RV2...RVn, CHAIN = CALL(CHAIN, CC#, ISVARARG, ISTAILCALL, CALLEE,
/// RV1, RV2...RVn, CHAIN = CALL(CHAIN, CALLEE,
/// ARG0, FLAG0, ARG1, FLAG1, ... ARGn, FLAGn)
/// This node represents a fully general function call, before the legalizer
/// runs. This has one result value for each argument / flag pair, plus
@ -194,6 +194,11 @@ namespace ISD {
/// Bit 10-26 - size of byval structures
/// Bits 31:27 - argument ABI alignment in the first argument piece and
/// alignment '1' in other argument pieces.
///
/// CALL nodes use the CallSDNode subclass of SDNode, which
/// additionally carries information about the calling convention,
/// whether the call is varargs, and if it's marked as a tail call.
///
CALL,
// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
@ -2181,6 +2186,49 @@ public:
}
};
/// CallSDNode - Node for calls -- ISD::CALL.
class CallSDNode : public SDNode {
unsigned CallingConv;
bool IsVarArg;
bool IsTailCall;
virtual void ANCHOR(); // Out-of-line virtual method to give class a home.
protected:
friend class SelectionDAG;
CallSDNode(unsigned cc, bool isvararg, bool istailcall,
SDVTList VTs, const SDValue *Operands, unsigned numOperands)
: SDNode(ISD::CALL, VTs, Operands, numOperands),
CallingConv(cc), IsVarArg(isvararg), IsTailCall(istailcall) {}
public:
unsigned getCallingConv() const { return CallingConv; }
unsigned isVarArg() const { return IsVarArg; }
unsigned isTailCall() const { return IsTailCall; }
/// Set this call to not be marked as a tail call. Normally setter
/// methods in SDNodes are unsafe because it breaks the CSE map,
/// but we don't CSE calls so it's ok in this case.
void setNotTailCall() { IsTailCall = false; }
SDValue getChain() const { return getOperand(0); }
SDValue getCallee() const { return getOperand(1); }
unsigned getNumArgs() const { return (getNumOperands() - 2) / 2; }
SDValue getArg(unsigned i) const { return getOperand(2+2*i); }
SDValue getArgFlagsVal(unsigned i) const {
return getOperand(3+2*i);
}
ISD::ArgFlagsTy getArgFlags(unsigned i) const {
return cast<ARG_FLAGSSDNode>(getArgFlagsVal(i).getNode())->getArgFlags();
}
unsigned getNumRetVals() const { return getNumValues() - 1; }
MVT getRetValType(unsigned i) const { return getValueType(i); }
static bool classof(const CallSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::CALL;
}
};
/// VTSDNode - This class is used to represent MVT's, which are used
/// to parameterize some operations.
class VTSDNode : public SDNode {

View File

@ -1075,7 +1075,7 @@ public:
/// IsEligibleForTailCallOptimization - Check whether the call is eligible for
/// tail call optimization. Targets which want to do tail call optimization
/// should override this function.
virtual bool IsEligibleForTailCallOptimization(SDValue Call,
virtual bool IsEligibleForTailCallOptimization(CallSDNode *Call,
SDValue Ret,
SelectionDAG &DAG) const {
return false;
@ -1085,15 +1085,15 @@ public:
/// preceeds the RET node and whether the return uses the result of the node
/// or is a void return. This function can be used by the target to determine
/// eligiblity of tail call optimization.
static bool CheckTailCallReturnConstraints(SDValue Call, SDValue Ret) {
static bool CheckTailCallReturnConstraints(CallSDNode *TheCall, SDValue Ret) {
unsigned NumOps = Ret.getNumOperands();
if ((NumOps == 1 &&
(Ret.getOperand(0) == SDValue(Call.getNode(),1) ||
Ret.getOperand(0) == SDValue(Call.getNode(),0))) ||
(Ret.getOperand(0) == SDValue(TheCall,1) ||
Ret.getOperand(0) == SDValue(TheCall,0))) ||
(NumOps > 1 &&
Ret.getOperand(0) == SDValue(Call.getNode(),
Call.getNode()->getNumValues()-1) &&
Ret.getOperand(1) == SDValue(Call.getNode(),0)))
Ret.getOperand(0) == SDValue(TheCall,
TheCall->getNumValues()-1) &&
Ret.getOperand(1) == SDValue(TheCall,0)))
return true;
return false;
}

View File

@ -91,12 +91,11 @@ void CCState::AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn) {
/// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
/// about the passed values into this state.
void CCState::AnalyzeCallOperands(SDNode *TheCall, CCAssignFn Fn) {
unsigned NumOps = (TheCall->getNumOperands() - 5) / 2;
void CCState::AnalyzeCallOperands(CallSDNode *TheCall, CCAssignFn Fn) {
unsigned NumOps = TheCall->getNumArgs();
for (unsigned i = 0; i != NumOps; ++i) {
MVT ArgVT = TheCall->getOperand(5+2*i).getValueType();
ISD::ArgFlagsTy ArgFlags =
cast<ARG_FLAGSSDNode>(TheCall->getOperand(5+2*i+1))->getArgFlags();
MVT ArgVT = TheCall->getArg(i).getValueType();
ISD::ArgFlagsTy ArgFlags = TheCall->getArgFlags(i);
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
cerr << "Call operand #" << i << " has unhandled type "
<< ArgVT.getMVTString() << "\n";
@ -124,9 +123,9 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
/// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
/// incorporating info about the passed values into this state.
void CCState::AnalyzeCallResult(SDNode *TheCall, CCAssignFn Fn) {
for (unsigned i = 0, e = TheCall->getNumValues() - 1; i != e; ++i) {
MVT VT = TheCall->getValueType(i);
void CCState::AnalyzeCallResult(CallSDNode *TheCall, CCAssignFn Fn) {
for (unsigned i = 0, e = TheCall->getNumRetVals(); i != e; ++i) {
MVT VT = TheCall->getRetValType(i);
if (Fn(i, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
cerr << "Call result #" << i << " has unhandled type "
<< VT.getMVTString() << "\n";

View File

@ -595,13 +595,13 @@ void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
/// correspond to it. This is useful when we're about to delete or repurpose
/// the node. We don't want future request for structurally identical nodes
/// to return N anymore.
void SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
bool Erased = false;
switch (N->getOpcode()) {
case ISD::EntryToken:
assert(0 && "EntryToken should not be in CSEMaps!");
return;
case ISD::HANDLENODE: return; // noop.
return false;
case ISD::HANDLENODE: return false; // noop.
case ISD::CONDCODE:
assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
"Cond code doesn't exist!");
@ -635,7 +635,8 @@ void SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
// flag result (which cannot be CSE'd) or is one of the special cases that are
// not subject to CSE.
if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Flag &&
!N->isTargetOpcode() &&
!N->isMachineOpcode() &&
N->getOpcode() != ISD::CALL &&
N->getOpcode() != ISD::DBG_LABEL &&
N->getOpcode() != ISD::DBG_STOPPOINT &&
N->getOpcode() != ISD::EH_LABEL &&
@ -645,6 +646,7 @@ void SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
assert(0 && "Node is not in map!");
}
#endif
return Erased;
}
/// AddNonLeafNodeToCSEMaps - Add the specified node back to the CSE maps. It
@ -660,6 +662,7 @@ SDNode *SelectionDAG::AddNonLeafNodeToCSEMaps(SDNode *N) {
switch (N->getOpcode()) {
default: break;
case ISD::CALL:
case ISD::HANDLENODE:
case ISD::DBG_LABEL:
case ISD::DBG_STOPPOINT:
@ -3303,6 +3306,21 @@ SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
return getNode(ISD::MERGE_VALUES, getVTList(&VTs[0], NumOps), Ops, NumOps);
}
SDValue
SelectionDAG::getCall(unsigned CallingConv, bool IsVarArgs, bool IsTailCall,
SDVTList VTs,
const SDValue *Operands, unsigned NumOperands) {
// Do not CSE calls. Note that in addition to being a compile-time
// optimization (since attempting CSE of calls is unlikely to be
// meaningful), we actually depend on this behavior. CallSDNode can
// be mutated, which is only safe if calls are not CSE'd.
SDNode *N = NodeAllocator.Allocate<CallSDNode>();
new (N) CallSDNode(CallingConv, IsVarArgs, IsTailCall,
VTs, Operands, NumOperands);
AllNodes.push_back(N);
return SDValue(N, 0);
}
SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
MVT VT, SDValue Chain,
@ -3761,7 +3779,8 @@ SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) {
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
RemoveNodeFromCSEMaps(N);
if (!RemoveNodeFromCSEMaps(N))
InsertPos = 0;
// Now we update the operands.
N->OperandList[0].getVal()->removeUser(0, N);
@ -3790,7 +3809,8 @@ UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) {
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
RemoveNodeFromCSEMaps(N);
if (!RemoveNodeFromCSEMaps(N))
InsertPos = 0;
// Now we update the operands.
if (N->OperandList[0] != Op1) {
@ -3856,7 +3876,8 @@ UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
RemoveNodeFromCSEMaps(N);
if (!RemoveNodeFromCSEMaps(N))
InsertPos = 0;
// Now we update the operands.
for (unsigned i = 0; i != NumOps; ++i) {
@ -4079,7 +4100,8 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
return ON;
}
RemoveNodeFromCSEMaps(N);
if (!RemoveNodeFromCSEMaps(N))
IP = 0;
// Start the morphing.
N->NodeType = Opc;
@ -4582,6 +4604,7 @@ void MemSDNode::ANCHOR() {}
void LoadSDNode::ANCHOR() {}
void StoreSDNode::ANCHOR() {}
void AtomicSDNode::ANCHOR() {}
void CallSDNode::ANCHOR() {}
HandleSDNode::~HandleSDNode() {
DropOperands();

View File

@ -5325,9 +5325,6 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
ArgListTy &Args, SelectionDAG &DAG) {
SmallVector<SDValue, 32> Ops;
Ops.push_back(Chain); // Op#0 - Chain
Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC
Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg
Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail
Ops.push_back(Callee);
// Handle all of the outgoing arguments.
@ -5412,10 +5409,10 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
LoweredRetTys.push_back(MVT::Other); // Always has a chain.
// Create the CALL node.
SDValue Res = DAG.getNode(ISD::CALL,
DAG.getVTList(&LoweredRetTys[0],
LoweredRetTys.size()),
&Ops[0], Ops.size());
SDValue Res = DAG.getCall(CallingConv, isVarArg, isTailCall,
DAG.getVTList(&LoweredRetTys[0],
LoweredRetTys.size()),
&Ops[0], Ops.size());
Chain = Res.getValue(LoweredRetTys.size() - 1);
// Gather up the call result into a single value.

View File

@ -402,56 +402,45 @@ static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG,
for (SelectionDAG::allnodes_iterator BE = DAG.allnodes_begin(),
BI = DAG.allnodes_end(); BI != BE; ) {
--BI;
if (BI->getOpcode() == ISD::CALL) {
if (CallSDNode *TheCall = dyn_cast<CallSDNode>(BI)) {
SDValue OpRet(Ret, 0);
SDValue OpCall(BI, 0);
bool isMarkedTailCall =
cast<ConstantSDNode>(OpCall.getOperand(3))->getZExtValue() != 0;
bool isMarkedTailCall = TheCall->isTailCall();
// If CALL node has tail call attribute set to true and the call is not
// eligible (no RET or the target rejects) the attribute is fixed to
// false. The TargetLowering::IsEligibleForTailCallOptimization function
// must correctly identify tail call optimizable calls.
if (!isMarkedTailCall) continue;
if (Ret==NULL ||
!TLI.IsEligibleForTailCallOptimization(OpCall, OpRet, DAG)) {
// Not eligible. Mark CALL node as non tail call.
SmallVector<SDValue, 32> Ops;
unsigned idx=0;
for(SDNode::op_iterator I =OpCall.getNode()->op_begin(),
E = OpCall.getNode()->op_end(); I != E; I++, idx++) {
if (idx!=3)
Ops.push_back(*I);
else
Ops.push_back(DAG.getConstant(false, TLI.getPointerTy()));
}
DAG.UpdateNodeOperands(OpCall, Ops.begin(), Ops.size());
!TLI.IsEligibleForTailCallOptimization(TheCall, OpRet, DAG)) {
// Not eligible. Mark CALL node as non tail call. Note that we
// can modify the call node in place since calls are not CSE'd.
TheCall->setNotTailCall();
} else {
// Look for tail call clobbered arguments. Emit a series of
// copyto/copyfrom virtual register nodes to protect them.
SmallVector<SDValue, 32> Ops;
SDValue Chain = OpCall.getOperand(0), InFlag;
unsigned idx=0;
for(SDNode::op_iterator I = OpCall.getNode()->op_begin(),
E = OpCall.getNode()->op_end(); I != E; I++, idx++) {
SDValue Arg = *I;
if (idx > 4 && (idx % 2)) {
bool isByVal = cast<ARG_FLAGSSDNode>(OpCall.getOperand(idx+1))->
getArgFlags().isByVal();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
if (!isByVal &&
IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) {
MVT VT = Arg.getValueType();
unsigned VReg = MF.getRegInfo().
createVirtualRegister(TLI.getRegClassFor(VT));
Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag);
InFlag = Chain.getValue(1);
Arg = DAG.getCopyFromReg(Chain, VReg, VT, InFlag);
Chain = Arg.getValue(1);
InFlag = Arg.getValue(2);
}
SDValue Chain = TheCall->getChain(), InFlag;
Ops.push_back(Chain);
Ops.push_back(TheCall->getCallee());
for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; ++i) {
SDValue Arg = TheCall->getArg(i);
bool isByVal = TheCall->getArgFlags(i).isByVal();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
if (!isByVal &&
IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) {
MVT VT = Arg.getValueType();
unsigned VReg = MF.getRegInfo().
createVirtualRegister(TLI.getRegClassFor(VT));
Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag);
InFlag = Chain.getValue(1);
Arg = DAG.getCopyFromReg(Chain, VReg, VT, InFlag);
Chain = Arg.getValue(1);
InFlag = Arg.getValue(2);
}
Ops.push_back(Arg);
Ops.push_back(TheCall->getArgFlagsVal(i));
}
// Link in chain of CopyTo/CopyFromReg.
Ops[0] = Chain;

View File

@ -179,6 +179,12 @@ std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node,
Op += ":" + utostr(D->getColumn());
} else if (const LabelSDNode *L = dyn_cast<LabelSDNode>(Node)) {
Op += ": LabelID=" + utostr(L->getLabelID());
} else if (const CallSDNode *C = dyn_cast<CallSDNode>(Node)) {
Op += ": CallingConv=" + utostr(C->getCallingConv());
if (C->isVarArg())
Op += ", isVarArg";
if (C->isTailCall())
Op += ", isTailCall";
} else if (const ExternalSymbolSDNode *ES =
dyn_cast<ExternalSymbolSDNode>(Node)) {
Op += "'" + std::string(ES->getSymbol()) + "'";

View File

@ -410,13 +410,14 @@ HowToPassArgument(MVT ObjectVT, unsigned NumGPRs,
/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
/// nodes.
SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
MVT RetVT= Op.getNode()->getValueType(0);
SDValue Chain = Op.getOperand(0);
unsigned CallConv = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
MVT RetVT = TheCall->getRetValType(0);
SDValue Chain = TheCall->getChain();
unsigned CallConv = TheCall->getCallingConv();
assert((CallConv == CallingConv::C ||
CallConv == CallingConv::Fast) && "unknown calling convention");
SDValue Callee = Op.getOperand(4);
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
SDValue Callee = TheCall->getCallee();
unsigned NumOps = TheCall->getNumArgs();
unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
unsigned NumGPRs = 0; // GPRs used for parameter passing.
@ -429,9 +430,8 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
unsigned ObjGPRs;
unsigned StackPad;
unsigned GPRPad;
MVT ObjectVT = Op.getOperand(5+2*i).getValueType();
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
MVT ObjectVT = TheCall->getArg(i).getValueType();
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
HowToPassArgument(ObjectVT, NumGPRs, NumBytes, ObjGPRs, ObjSize,
GPRPad, StackPad, Flags);
NumBytes += ObjSize + StackPad;
@ -453,9 +453,8 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
std::vector<std::pair<unsigned, SDValue> > RegsToPass;
std::vector<SDValue> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = Op.getOperand(5+2*i);
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
SDValue Arg = TheCall->getArg(i);
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
MVT ArgVT = Arg.getValueType();
unsigned ObjSize;
@ -631,7 +630,8 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
case MVT::i32:
Chain = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag).getValue(1);
ResultVals.push_back(Chain.getValue(0));
if (Op.getNode()->getValueType(1) == MVT::i32) {
if (TheCall->getNumRetVals() > 1 &&
TheCall->getRetValType(1) == MVT::i32) {
// Returns a i64 value.
Chain = DAG.getCopyFromReg(Chain, ARM::R1, MVT::i32,
Chain.getValue(2)).getValue(1);

View File

@ -1101,13 +1101,14 @@ static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
static
SDValue
LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDValue Chain = Op.getOperand(0);
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
SDValue Chain = TheCall->getChain();
#if 0
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue() != 0;
bool isVarArg = TheCall->isVarArg();
bool isTailCall = TheCall->isTailCall();
#endif
SDValue Callee = Op.getOperand(4);
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
SDValue Callee = TheCall->getCallee();
unsigned NumOps = TheCall->getNumArgs();
unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
@ -1136,7 +1137,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SmallVector<SDValue, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = Op.getOperand(5+2*i);
SDValue Arg = TheCall->getArg(i);
// PtrOff will be used to store the current argument to the stack if a
// register cannot be found for it.
@ -1256,18 +1257,18 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
DAG.getConstant(NumStackBytes, PtrVT),
DAG.getConstant(0, PtrVT),
InFlag);
if (Op.getNode()->getValueType(0) != MVT::Other)
if (TheCall->getValueType(0) != MVT::Other)
InFlag = Chain.getValue(1);
SDValue ResultVals[3];
unsigned NumResults = 0;
// If the call has results, copy the values out of the ret val registers.
switch (Op.getNode()->getValueType(0).getSimpleVT()) {
switch (TheCall->getValueType(0).getSimpleVT()) {
default: assert(0 && "Unexpected ret value!");
case MVT::Other: break;
case MVT::i32:
if (Op.getNode()->getValueType(1) == MVT::i32) {
if (TheCall->getValueType(1) == MVT::i32) {
Chain = DAG.getCopyFromReg(Chain, SPU::R4, MVT::i32, InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
Chain = DAG.getCopyFromReg(Chain, SPU::R3, MVT::i32,
@ -1287,7 +1288,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
break;
case MVT::f32:
case MVT::f64:
Chain = DAG.getCopyFromReg(Chain, SPU::R3, Op.getNode()->getValueType(0),
Chain = DAG.getCopyFromReg(Chain, SPU::R3, TheCall->getValueType(0),
InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
NumResults = 1;
@ -1297,7 +1298,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
case MVT::v4i32:
case MVT::v8i16:
case MVT::v16i8:
Chain = DAG.getCopyFromReg(Chain, SPU::R3, Op.getNode()->getValueType(0),
Chain = DAG.getCopyFromReg(Chain, SPU::R3, TheCall->getValueType(0),
InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
NumResults = 1;

View File

@ -585,10 +585,11 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
{
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
SDValue Callee = Op.getOperand(4);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
SDValue Chain = TheCall->getChain();
SDValue Callee = TheCall->getCallee();
bool isVarArg = TheCall->isVarArg();
unsigned CC = TheCall->getCallingConv();
MachineFrameInfo *MFI = MF.getFrameInfo();
@ -603,7 +604,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
MFI->CreateFixedObject(VTsize, (VTsize*3));
}
CCInfo.AnalyzeCallOperands(Op.getNode(), CC_Mips);
CCInfo.AnalyzeCallOperands(TheCall, CC_Mips);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
@ -624,7 +625,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
CCValAssign &VA = ArgLocs[i];
// Arguments start after the 5 first operands of ISD::CALL
SDValue Arg = Op.getOperand(5+2*VA.getValNo());
SDValue Arg = TheCall->getArg(i);
// Promote the value if needed.
switch (VA.getLocInfo()) {
@ -751,7 +752,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
// Handle result values, copying them out of physregs into vregs that we
// return.
return SDValue(LowerCallResult(Chain, InFlag, Op.getNode(), CC, DAG), Op.getResNo());
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG), Op.getResNo());
}
/// LowerCallResult - Lower the result values of an ISD::CALL into the
@ -760,11 +761,10 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
/// being lowered. Returns a SDNode with the same number of values as the
/// ISD::CALL.
SDNode *MipsTargetLowering::
LowerCallResult(SDValue Chain, SDValue InFlag, SDNode *TheCall,
LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG) {
bool isVarArg =
cast<ConstantSDNode>(TheCall->getOperand(2))->getZExtValue() != 0;
bool isVarArg = TheCall->isVarArg();
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;

View File

@ -86,7 +86,7 @@ namespace llvm {
const MipsSubtarget *Subtarget;
// Lower Operand helpers
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, SDNode*TheCall,
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG);
bool IsGlobalInSmallSection(GlobalValue *GV);
bool IsInSmallSection(unsigned Size);

View File

@ -1328,10 +1328,9 @@ static const unsigned *GetFPR(const PPCSubtarget &Subtarget) {
/// CalculateStackSlotSize - Calculates the size reserved for this argument on
/// the stack.
static unsigned CalculateStackSlotSize(SDValue Arg, SDValue Flag,
static unsigned CalculateStackSlotSize(SDValue Arg, ISD::ArgFlagsTy Flags,
bool isVarArg, unsigned PtrByteSize) {
MVT ArgVT = Arg.getValueType();
ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Flag)->getArgFlags();
unsigned ArgSize =ArgVT.getSizeInBits()/8;
if (Flags.isByVal())
ArgSize = Flags.getByValSize();
@ -1475,14 +1474,14 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
if (isVarArg || isPPC64) {
MinReservedArea = ((MinReservedArea+15)/16)*16;
MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
Op.getOperand(ArgNo+3),
Flags,
isVarArg,
PtrByteSize);
} else nAltivecParamsAtEnd++;
} else
// Calculate min reserved area.
MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
Op.getOperand(ArgNo+3),
Flags,
isVarArg,
PtrByteSize);
@ -1794,13 +1793,13 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
bool isMachoABI,
bool isVarArg,
unsigned CC,
SDValue Call,
CallSDNode *TheCall,
unsigned &nAltivecParamsAtEnd) {
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is
// prereserved space for [SP][CR][LR][3 x unused].
unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
unsigned NumOps = (Call.getNumOperands() - 5) / 2;
unsigned NumOps = TheCall->getNumArgs();
unsigned PtrByteSize = isPPC64 ? 8 : 4;
// Add up all the space actually used.
@ -1811,8 +1810,8 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// 16-byte aligned.
nAltivecParamsAtEnd = 0;
for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = Call.getOperand(5+2*i);
SDValue Flag = Call.getOperand(5+2*i+1);
SDValue Arg = TheCall->getArg(i);
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
MVT ArgVT = Arg.getValueType();
// Varargs Altivec parameters are padded to a 16 byte boundary.
if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
@ -1826,7 +1825,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
NumBytes = ((NumBytes+15)/16)*16;
}
NumBytes += CalculateStackSlotSize(Arg, Flag, isVarArg, PtrByteSize);
NumBytes += CalculateStackSlotSize(Arg, Flags, isVarArg, PtrByteSize);
}
// Allow for Altivec parameters at the end, if needed.
@ -1876,27 +1875,25 @@ static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall,
/// calling conventions match, currently only fastcc supports tail calls, and
/// the function CALL is immediatly followed by a RET.
bool
PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Call,
PPCTargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall,
SDValue Ret,
SelectionDAG& DAG) const {
// Variable argument functions are not supported.
if (!PerformTailCallOpt ||
cast<ConstantSDNode>(Call.getOperand(2))->getZExtValue() != 0)
if (!PerformTailCallOpt || TheCall->isVarArg())
return false;
if (CheckTailCallReturnConstraints(Call, Ret)) {
if (CheckTailCallReturnConstraints(TheCall, Ret)) {
MachineFunction &MF = DAG.getMachineFunction();
unsigned CallerCC = MF.getFunction()->getCallingConv();
unsigned CalleeCC= cast<ConstantSDNode>(Call.getOperand(1))->getZExtValue();
unsigned CalleeCC = TheCall->getCallingConv();
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
// Functions containing by val parameters are not supported.
for (unsigned i = 0; i != ((Call.getNumOperands()-5)/2); i++) {
ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Call.getOperand(5+2*i+1))
->getArgFlags();
for (unsigned i = 0; i != TheCall->getNumArgs(); i++) {
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
if (Flags.isByVal()) return false;
}
SDValue Callee = Call.getOperand(4);
SDValue Callee = TheCall->getCallee();
// Non PIC/GOT tail calls are supported.
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return true;
@ -2070,13 +2067,14 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget,
TargetMachine &TM) {
SDValue Chain = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue() != 0
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
SDValue Chain = TheCall->getChain();
bool isVarArg = TheCall->isVarArg();
unsigned CC = TheCall->getCallingConv();
bool isTailCall = TheCall->isTailCall()
&& CC == CallingConv::Fast && PerformTailCallOpt;
SDValue Callee = Op.getOperand(4);
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
SDValue Callee = TheCall->getCallee();
unsigned NumOps = TheCall->getNumArgs();
bool isMachoABI = Subtarget.isMachoABI();
bool isELF32_ABI = Subtarget.isELF32_ABI();
@ -2106,7 +2104,7 @@ SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
// prereserved space for [SP][CR][LR][3 x unused].
unsigned NumBytes =
CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isMachoABI, isVarArg, CC,
Op, nAltivecParamsAtEnd);
TheCall, nAltivecParamsAtEnd);
// Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization.
@ -2165,9 +2163,8 @@ SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
SmallVector<SDValue, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) {
bool inMem = false;
SDValue Arg = Op.getOperand(5+2*i);
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
SDValue Arg = TheCall->getArg(i);
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
// See if next argument requires stack alignment in ELF
bool Align = Flags.isSplit();
@ -2391,7 +2388,7 @@ SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
ArgOffset = ((ArgOffset+15)/16)*16;
ArgOffset += 12*16;
for (unsigned i = 0; i != NumOps; ++i) {
SDValue Arg = Op.getOperand(5+2*i);
SDValue Arg = TheCall->getArg(i);
MVT ArgType = Arg.getValueType();
if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
@ -2530,7 +2527,7 @@ SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
assert(InFlag.getNode() &&
"Flag must be set. Depend on flag being set in LowerRET");
Chain = DAG.getNode(PPCISD::TAILCALL,
Op.getNode()->getVTList(), &Ops[0], Ops.size());
TheCall->getVTList(), &Ops[0], Ops.size());
return SDValue(Chain.getNode(), Op.getResNo());
}
@ -2541,14 +2538,14 @@ SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
DAG.getConstant(NumBytes, PtrVT),
DAG.getConstant(BytesCalleePops, PtrVT),
InFlag);
if (Op.getNode()->getValueType(0) != MVT::Other)
if (TheCall->getValueType(0) != MVT::Other)
InFlag = Chain.getValue(1);
SmallVector<SDValue, 16> ResultVals;
SmallVector<CCValAssign, 16> RVLocs;
unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv();
CCState CCInfo(CallerCC, isVarArg, TM, RVLocs);
CCInfo.AnalyzeCallResult(Op.getNode(), RetCC_PPC);
CCInfo.AnalyzeCallResult(TheCall, RetCC_PPC);
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
@ -2566,7 +2563,7 @@ SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
// Otherwise, merge everything together with a MERGE_VALUES node.
ResultVals.push_back(Chain);
SDValue Res = DAG.getMergeValues(Op.getNode()->getVTList(), &ResultVals[0],
SDValue Res = DAG.getMergeValues(TheCall->getVTList(), &ResultVals[0],
ResultVals.size());
return Res.getValue(Op.getResNo());
}

View File

@ -322,7 +322,7 @@ namespace llvm {
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Target which want to do tail call
/// optimization should implement this function.
virtual bool IsEligibleForTailCallOptimization(SDValue Call,
virtual bool IsEligibleForTailCallOptimization(CallSDNode *TheCall,
SDValue Ret,
SelectionDAG &DAG) const;

View File

@ -224,10 +224,11 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
}
static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
unsigned CallingConv = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
SDValue Chain = Op.getOperand(0);
SDValue Callee = Op.getOperand(4);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
unsigned CallingConv = TheCall->getCallingConv();
SDValue Chain = TheCall->getChain();
SDValue Callee = TheCall->getCallee();
bool isVarArg = TheCall->isVarArg();
#if 0
// Analyze operands of the call, assigning locations to each operand.
@ -243,8 +244,8 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Count the size of the outgoing arguments.
unsigned ArgsSize = 0;
for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) {
switch (Op.getOperand(i).getValueType().getSimpleVT()) {
for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; ++i) {
switch (TheCall->getArg(i).getValueType().getSimpleVT()) {
default: assert(0 && "Unknown value type!");
case MVT::i1:
case MVT::i8:
@ -279,7 +280,7 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
CCValAssign &VA = ArgLocs[i];
// Arguments start after the 5 first operands of ISD::CALL
SDValue Arg = Op.getOperand(5+2*VA.getValNo());
SDValue Arg = TheCall->getArg(i);
// Promote the value if needed.
switch (VA.getLocInfo()) {
@ -319,8 +320,8 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
};
unsigned ArgOffset = 68;
for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) {
SDValue Val = Op.getOperand(i);
for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; ++i) {
SDValue Val = TheCall->getArg(i);
MVT ObjectVT = Val.getValueType();
SDValue ValToStore(0, 0);
unsigned ObjSize;
@ -428,7 +429,7 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
SmallVector<CCValAssign, 16> RVLocs;
CCState RVInfo(CallingConv, isVarArg, DAG.getTarget(), RVLocs);
RVInfo.AnalyzeCallResult(Op.getNode(), RetCC_Sparc32);
RVInfo.AnalyzeCallResult(TheCall, RetCC_Sparc32);
SmallVector<SDValue, 8> ResultVals;
// Copy all of the result registers out of their specified physreg.
@ -448,7 +449,7 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
ResultVals.push_back(Chain);
// Merge everything together with a MERGE_VALUES node.
return DAG.getMergeValues(Op.getNode()->getVTList(), &ResultVals[0],
return DAG.getMergeValues(TheCall->getVTList(), &ResultVals[0],
ResultVals.size());
}

View File

@ -977,13 +977,12 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
/// being lowered. The returns a SDNode with the same number of values as the
/// ISD::CALL.
SDNode *X86TargetLowering::
LowerCallResult(SDValue Chain, SDValue InFlag, SDNode *TheCall,
LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG) {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
bool isVarArg =
cast<ConstantSDNode>(TheCall->getOperand(2))->getZExtValue() != 0;
bool isVarArg = TheCall->isVarArg();
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
@ -1048,12 +1047,12 @@ static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
/// CallIsStructReturn - Determines whether a CALL node uses struct return
/// semantics.
static bool CallIsStructReturn(SDValue Op) {
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
static bool CallIsStructReturn(CallSDNode *TheCall) {
unsigned NumOps = TheCall->getNumArgs();
if (!NumOps)
return false;
return cast<ARG_FLAGSSDNode>(Op.getOperand(6))->getArgFlags().isSRet();
return TheCall->getArgFlags(0).isSRet();
}
/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct
@ -1069,12 +1068,11 @@ static bool ArgsAreStructReturn(SDValue Op) {
/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires
/// the callee to pop its own arguments. Callee pop is necessary to support tail
/// calls.
bool X86TargetLowering::IsCalleePop(SDValue Op) {
bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
bool X86TargetLowering::IsCalleePop(bool IsVarArg, unsigned CallingConv) {
if (IsVarArg)
return false;
switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
switch (CallingConv) {
default:
return false;
case CallingConv::X86_StdCall:
@ -1086,11 +1084,9 @@ bool X86TargetLowering::IsCalleePop(SDValue Op) {
}
}
/// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or
/// FORMAL_ARGUMENTS node.
CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDValue Op) const {
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
/// given CallingConvention value.
CCAssignFn *X86TargetLowering::CCAssignFnForNode(unsigned CC) const {
if (Subtarget->is64Bit()) {
if (Subtarget->isTargetWin64())
return CC_X86_Win64_C;
@ -1203,7 +1199,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
CCInfo.AnalyzeFormalArguments(Op.getNode(), CCAssignFnForNode(Op));
CCInfo.AnalyzeFormalArguments(Op.getNode(), CCAssignFnForNode(CC));
SmallVector<SDValue, 8> ArgValues;
unsigned LastVal = ~0U;
@ -1388,7 +1384,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
ArgValues.push_back(Root);
// Some CCs need callee pop.
if (IsCalleePop(Op)) {
if (IsCalleePop(isVarArg, CC)) {
BytesToPopOnReturn = StackSize; // Callee pops everything.
BytesCallerReserves = 0;
} else {
@ -1413,16 +1409,14 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
}
SDValue
X86TargetLowering::LowerMemOpCallTo(SDValue Op, SelectionDAG &DAG,
X86TargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
const SDValue &StackPtr,
const CCValAssign &VA,
SDValue Chain,
SDValue Arg) {
SDValue Arg, ISD::ArgFlagsTy Flags) {
unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))->getArgFlags();
if (Flags.isByVal()) {
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG);
}
@ -1470,14 +1464,15 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue() != 0
&& CC == CallingConv::Fast && PerformTailCallOpt;
SDValue Callee = Op.getOperand(4);
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
SDValue Chain = TheCall->getChain();
unsigned CC = TheCall->getCallingConv();
bool isVarArg = TheCall->isVarArg();
bool IsTailCall = TheCall->isTailCall() &&
CC == CallingConv::Fast && PerformTailCallOpt;
SDValue Callee = TheCall->getCallee();
bool Is64Bit = Subtarget->is64Bit();
bool IsStructRet = CallIsStructReturn(Op);
bool IsStructRet = CallIsStructReturn(TheCall);
assert(!(isVarArg && CC == CallingConv::Fast) &&
"Var args not supported with calling convention fastcc");
@ -1485,7 +1480,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
CCInfo.AnalyzeCallOperands(Op.getNode(), CCAssignFnForNode(Op));
CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC));
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
@ -1520,9 +1515,9 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// of tail call optimization arguments are handle later.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = Op.getOperand(5+2*VA.getValNo());
bool isByVal = cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))->
getArgFlags().isByVal();
SDValue Arg = TheCall->getArg(i);
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
bool isByVal = Flags.isByVal();
// Promote the value if needed.
switch (VA.getLocInfo()) {
@ -1571,8 +1566,8 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
if (StackPtr.getNode() == 0)
StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy());
MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
Arg));
MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
Chain, Arg, Flags));
}
}
}
@ -1651,10 +1646,8 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
CCValAssign &VA = ArgLocs[i];
if (!VA.isRegLoc()) {
assert(VA.isMemLoc());
SDValue Arg = Op.getOperand(5+2*VA.getValNo());
SDValue FlagsOp = Op.getOperand(6+2*VA.getValNo());
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(FlagsOp)->getArgFlags();
SDValue Arg = TheCall->getArg(i);
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
// Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff;
uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
@ -1764,7 +1757,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
assert(InFlag.getNode() &&
"Flag must be set. Depend on flag being set in LowerRET");
Chain = DAG.getNode(X86ISD::TAILCALL,
Op.getNode()->getVTList(), &Ops[0], Ops.size());
TheCall->getVTList(), &Ops[0], Ops.size());
return SDValue(Chain.getNode(), Op.getResNo());
}
@ -1774,7 +1767,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush;
if (IsCalleePop(Op))
if (IsCalleePop(isVarArg, CC))
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
else if (!Is64Bit && CC != CallingConv::Fast && IsStructRet)
// If this is is a call to a struct-return function, the callee
@ -1793,7 +1786,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
// Handle result values, copying them out of physregs into vregs that we
// return.
return SDValue(LowerCallResult(Chain, InFlag, Op.getNode(), CC, DAG),
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG),
Op.getResNo());
}
@ -1855,18 +1848,18 @@ unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
/// following the call is a return. A function is eligible if caller/callee
/// calling conventions match, currently only fastcc supports tail calls, and
/// the function CALL is immediatly followed by a RET.
bool X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Call,
bool X86TargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall,
SDValue Ret,
SelectionDAG& DAG) const {
if (!PerformTailCallOpt)
return false;
if (CheckTailCallReturnConstraints(Call, Ret)) {
if (CheckTailCallReturnConstraints(TheCall, Ret)) {
MachineFunction &MF = DAG.getMachineFunction();
unsigned CallerCC = MF.getFunction()->getCallingConv();
unsigned CalleeCC= cast<ConstantSDNode>(Call.getOperand(1))->getZExtValue();
unsigned CalleeCC= TheCall->getCallingConv();
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
SDValue Callee = Call.getOperand(4);
SDValue Callee = TheCall->getCallee();
// On x86/32Bit PIC/GOT tail calls are supported.
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ ||
!Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit())

View File

@ -453,7 +453,7 @@ namespace llvm {
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Target which want to do tail call
/// optimization should implement this function.
virtual bool IsEligibleForTailCallOptimization(SDValue Call,
virtual bool IsEligibleForTailCallOptimization(CallSDNode *TheCall,
SDValue Ret,
SelectionDAG &DAG) const;
@ -493,27 +493,27 @@ namespace llvm {
bool X86ScalarSSEf32;
bool X86ScalarSSEf64;
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, SDNode*TheCall,
SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG);
SDValue LowerMemArgument(SDValue Op, SelectionDAG &DAG,
const CCValAssign &VA, MachineFrameInfo *MFI,
unsigned CC, SDValue Root, unsigned i);
SDValue LowerMemOpCallTo(SDValue Op, SelectionDAG &DAG,
SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
const SDValue &StackPtr,
const CCValAssign &VA, SDValue Chain,
SDValue Arg);
SDValue Arg, ISD::ArgFlagsTy Flags);
// Call lowering helpers.
bool IsCalleePop(SDValue Op);
bool IsCalleePop(bool isVarArg, unsigned CallingConv);
bool CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall);
bool CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall);
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
SDValue Chain, bool IsTailCall, bool Is64Bit,
int FPDiff);
CCAssignFn *CCAssignFnForNode(SDValue Op) const;
CCAssignFn *CCAssignFnForNode(unsigned CallingConv) const;
NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDValue Op);
unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG);