mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Early implementation of tail call for ARM.
A temporary flag -arm-tail-calls defaults to off, so there is no functional change by default. Intrepid users may try this; simple cases work but there are bugs. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@105413 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
400f75cb5e
commit
51e28e6348
@ -1571,6 +1571,7 @@ emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
|
||||
MachineBasicBlock::iterator MBBI = prior(MBB.end());
|
||||
assert(MBBI->getDesc().isReturn() &&
|
||||
"Can only insert epilog into returning blocks");
|
||||
unsigned RetOpcode = MBBI->getOpcode();
|
||||
DebugLoc dl = MBBI->getDebugLoc();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
@ -1645,6 +1646,35 @@ emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
|
||||
emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
|
||||
}
|
||||
|
||||
if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
|
||||
RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
|
||||
// Tail call return: adjust the stack pointer and jump to callee.
|
||||
MBBI = prior(MBB.end());
|
||||
MachineOperand &JumpTarget = MBBI->getOperand(0);
|
||||
|
||||
// Jump to label or value in register.
|
||||
if (RetOpcode == ARM::TCRETURNdi) {
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPd)).
|
||||
addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
|
||||
JumpTarget.getTargetFlags());
|
||||
} else if (RetOpcode == ARM::TCRETURNdiND) {
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPdND)).
|
||||
addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
|
||||
JumpTarget.getTargetFlags());
|
||||
} else if (RetOpcode == ARM::TCRETURNri) {
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr), JumpTarget.getReg());
|
||||
} else if (RetOpcode == ARM::TCRETURNriND) {
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND), JumpTarget.getReg());
|
||||
}
|
||||
|
||||
MachineInstr *NewMI = prior(MBBI);
|
||||
for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
|
||||
NewMI->addOperand(MBBI->getOperand(i));
|
||||
|
||||
// Delete the pseudo instruction TCRETURN.
|
||||
MBB.erase(MBBI);
|
||||
}
|
||||
|
||||
if (VARegSaveSize)
|
||||
emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define DEBUG_TYPE "arm-isel"
|
||||
#include "ARM.h"
|
||||
#include "ARMAddressingModes.h"
|
||||
#include "ARMTargetMachine.h"
|
||||
|
@ -12,6 +12,7 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define DEBUG_TYPE "arm-isel"
|
||||
#include "ARM.h"
|
||||
#include "ARMAddressingModes.h"
|
||||
#include "ARMConstantPoolValue.h"
|
||||
@ -40,6 +41,7 @@
|
||||
#include "llvm/MC/MCSectionMachO.h"
|
||||
#include "llvm/Target/TargetOptions.h"
|
||||
#include "llvm/ADT/VectorExtras.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
@ -47,6 +49,14 @@
|
||||
#include <sstream>
|
||||
using namespace llvm;
|
||||
|
||||
STATISTIC(NumTailCalls, "Number of tail calls");
|
||||
|
||||
// This option should go away when tail calls fully work.
|
||||
static cl::opt<bool>
|
||||
EnableARMTailCalls("arm-tail-calls", cl::Hidden,
|
||||
cl::desc("Generate tail calls (TEMPORARY OPTION)."),
|
||||
cl::init(false));
|
||||
|
||||
static cl::opt<bool>
|
||||
EnableARMLongCalls("arm-long-calls", cl::Hidden,
|
||||
cl::desc("Generate calls via indirect call instructions."),
|
||||
@ -535,6 +545,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||
case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
|
||||
case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
|
||||
|
||||
case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
|
||||
|
||||
case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
|
||||
|
||||
case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
|
||||
@ -983,8 +995,24 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
DebugLoc dl, SelectionDAG &DAG,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
// ARM target does not yet support tail call optimization.
|
||||
isTailCall = false;
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
|
||||
bool IsSibCall = false;
|
||||
if (isTailCall) {
|
||||
// Check if it's really possible to do a tail call.
|
||||
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
|
||||
isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
|
||||
Outs, Ins, DAG);
|
||||
// Temporarily disable tail calls so things don't break.
|
||||
if (!EnableARMTailCalls)
|
||||
isTailCall = false;
|
||||
// We don't support GuaranteedTailCallOpt for ARM, only automatically
|
||||
// detected sibcalls.
|
||||
if (isTailCall) {
|
||||
++NumTailCalls;
|
||||
IsSibCall = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
@ -997,9 +1025,14 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
// Get a count of how many bytes are to be pushed on the stack.
|
||||
unsigned NumBytes = CCInfo.getNextStackOffset();
|
||||
|
||||
// For tail calls, memory operands are available in our caller's stack.
|
||||
if (IsSibCall)
|
||||
NumBytes = 0;
|
||||
|
||||
// Adjust the stack pointer for the new arguments...
|
||||
// These operations are automatically eliminated by the prolog/epilog pass
|
||||
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
|
||||
if (!IsSibCall)
|
||||
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
|
||||
|
||||
SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
|
||||
|
||||
@ -1081,13 +1114,31 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
InFlag = Chain.getValue(1);
|
||||
}
|
||||
|
||||
// For tail calls lower the arguments to the 'real' stack slot.
|
||||
if (isTailCall) {
|
||||
// Force all the incoming stack arguments to be loaded from the stack
|
||||
// before any new outgoing arguments are stored to the stack, because the
|
||||
// outgoing stack slots may alias the incoming argument stack slots, and
|
||||
// the alias isn't otherwise explicit. This is slightly more conservative
|
||||
// than necessary, because it means that each store effectively depends
|
||||
// on every argument instead of just those arguments it would clobber.
|
||||
|
||||
// Do not flag preceeding copytoreg stuff together with the following stuff.
|
||||
InFlag = SDValue();
|
||||
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
||||
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
|
||||
RegsToPass[i].second, InFlag);
|
||||
InFlag = Chain.getValue(1);
|
||||
}
|
||||
InFlag =SDValue();
|
||||
}
|
||||
|
||||
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
|
||||
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
|
||||
// node so that legalize doesn't hack it.
|
||||
bool isDirect = false;
|
||||
bool isARMFunc = false;
|
||||
bool isLocalARMFunc = false;
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
|
||||
if (EnableARMLongCalls) {
|
||||
@ -1205,9 +1256,27 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
|
||||
if (InFlag.getNode())
|
||||
Ops.push_back(InFlag);
|
||||
|
||||
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
||||
if (isTailCall) {
|
||||
// If this is the first return lowered for this function, add the regs
|
||||
// to the liveout set for the function.
|
||||
if (MF.getRegInfo().liveout_empty()) {
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
|
||||
*DAG.getContext());
|
||||
CCInfo.AnalyzeCallResult(Ins,
|
||||
CCAssignFnForNode(CallConv, /* Return*/ true,
|
||||
isVarArg));
|
||||
for (unsigned i = 0; i != RVLocs.size(); ++i)
|
||||
if (RVLocs[i].isRegLoc())
|
||||
MF.getRegInfo().addLiveOut(RVLocs[i].getLocReg());
|
||||
}
|
||||
return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
|
||||
}
|
||||
|
||||
// Returns a chain and a flag for retval copy to use.
|
||||
Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
|
||||
&Ops[0], Ops.size());
|
||||
Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
|
||||
InFlag = Chain.getValue(1);
|
||||
|
||||
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
|
||||
@ -1221,6 +1290,161 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
dl, DAG, InVals);
|
||||
}
|
||||
|
||||
/// MatchingStackOffset - Return true if the given stack call argument is
|
||||
/// already available in the same position (relatively) of the caller's
|
||||
/// incoming argument stack.
|
||||
static
|
||||
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
|
||||
MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
|
||||
const ARMInstrInfo *TII) {
|
||||
unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
|
||||
int FI = INT_MAX;
|
||||
if (Arg.getOpcode() == ISD::CopyFromReg) {
|
||||
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
|
||||
if (!VR || TargetRegisterInfo::isPhysicalRegister(VR))
|
||||
return false;
|
||||
MachineInstr *Def = MRI->getVRegDef(VR);
|
||||
if (!Def)
|
||||
return false;
|
||||
if (!Flags.isByVal()) {
|
||||
if (!TII->isLoadFromStackSlot(Def, FI))
|
||||
return false;
|
||||
} else {
|
||||
// unsigned Opcode = Def->getOpcode();
|
||||
// if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) &&
|
||||
// Def->getOperand(1).isFI()) {
|
||||
// FI = Def->getOperand(1).getIndex();
|
||||
// Bytes = Flags.getByValSize();
|
||||
// } else
|
||||
return false;
|
||||
}
|
||||
} else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
|
||||
if (Flags.isByVal())
|
||||
// ByVal argument is passed in as a pointer but it's now being
|
||||
// dereferenced. e.g.
|
||||
// define @foo(%struct.X* %A) {
|
||||
// tail call @bar(%struct.X* byval %A)
|
||||
// }
|
||||
return false;
|
||||
SDValue Ptr = Ld->getBasePtr();
|
||||
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
|
||||
if (!FINode)
|
||||
return false;
|
||||
FI = FINode->getIndex();
|
||||
} else
|
||||
return false;
|
||||
|
||||
assert(FI != INT_MAX);
|
||||
if (!MFI->isFixedObjectIndex(FI))
|
||||
return false;
|
||||
return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
|
||||
}
|
||||
|
||||
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
|
||||
/// for tail call optimization. Targets which want to do tail call
|
||||
/// optimization should implement this function.
|
||||
bool
|
||||
ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
||||
CallingConv::ID CalleeCC,
|
||||
bool isVarArg,
|
||||
bool isCalleeStructRet,
|
||||
bool isCallerStructRet,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
SelectionDAG& DAG) const {
|
||||
|
||||
// const MachineFunction &MF = DAG.getMachineFunction();
|
||||
const Function *CallerF = DAG.getMachineFunction().getFunction();
|
||||
CallingConv::ID CallerCC = CallerF->getCallingConv();
|
||||
bool CCMatch = CallerCC == CalleeCC;
|
||||
|
||||
// Look for obvious safe cases to perform tail call optimization that do not
|
||||
// require ABI changes. This is what gcc calls sibcall.
|
||||
|
||||
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
|
||||
// emit a special epilogue.
|
||||
// Not sure yet if this is true on ARM.
|
||||
//?? if (RegInfo->needsStackRealignment(MF))
|
||||
//?? return false;
|
||||
|
||||
// Do not sibcall optimize vararg calls unless the call site is not passing any
|
||||
// arguments.
|
||||
if (isVarArg && !Outs.empty())
|
||||
return false;
|
||||
|
||||
// Also avoid sibcall optimization if either caller or callee uses struct
|
||||
// return semantics.
|
||||
if (isCalleeStructRet || isCallerStructRet)
|
||||
return false;
|
||||
|
||||
// If the calling conventions do not match, then we'd better make sure the
|
||||
// results are returned in the same way as what the caller expects.
|
||||
if (!CCMatch) {
|
||||
SmallVector<CCValAssign, 16> RVLocs1;
|
||||
CCState CCInfo1(CalleeCC, false, getTargetMachine(),
|
||||
RVLocs1, *DAG.getContext());
|
||||
CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
|
||||
|
||||
SmallVector<CCValAssign, 16> RVLocs2;
|
||||
CCState CCInfo2(CallerCC, false, getTargetMachine(),
|
||||
RVLocs2, *DAG.getContext());
|
||||
CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
|
||||
|
||||
if (RVLocs1.size() != RVLocs2.size())
|
||||
return false;
|
||||
for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
|
||||
if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
|
||||
return false;
|
||||
if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
|
||||
return false;
|
||||
if (RVLocs1[i].isRegLoc()) {
|
||||
if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
|
||||
return false;
|
||||
} else {
|
||||
if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the callee takes no arguments then go on to check the results of the
|
||||
// call.
|
||||
if (!Outs.empty()) {
|
||||
// Check if stack adjustment is needed. For now, do not do this if any
|
||||
// argument is passed on the stack.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
|
||||
ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallOperands(Outs,
|
||||
CCAssignFnForNode(CalleeCC, false, isVarArg));
|
||||
if (CCInfo.getNextStackOffset()) {
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
|
||||
// Check if the arguments are already laid out in the right way as
|
||||
// the caller's fixed stack objects.
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
const MachineRegisterInfo *MRI = &MF.getRegInfo();
|
||||
const ARMInstrInfo *TII =
|
||||
((ARMTargetMachine&)getTargetMachine()).getInstrInfo();
|
||||
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
||||
CCValAssign &VA = ArgLocs[i];
|
||||
EVT RegVT = VA.getLocVT();
|
||||
SDValue Arg = Outs[i].Val;
|
||||
ISD::ArgFlagsTy Flags = Outs[i].Flags;
|
||||
if (VA.getLocInfo() == CCValAssign::Indirect)
|
||||
return false;
|
||||
if (!VA.isRegLoc()) {
|
||||
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
|
||||
MFI, MRI, TII))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
SDValue
|
||||
ARMTargetLowering::LowerReturn(SDValue Chain,
|
||||
CallingConv::ID CallConv, bool isVarArg,
|
||||
|
@ -70,6 +70,8 @@ namespace llvm {
|
||||
EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
|
||||
EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
|
||||
|
||||
TC_RETURN, // Tail call return pseudo.
|
||||
|
||||
THREAD_POINTER,
|
||||
|
||||
DYN_ALLOC, // Dynamic allocation on the stack.
|
||||
@ -332,6 +334,17 @@ namespace llvm {
|
||||
DebugLoc dl, SelectionDAG &DAG,
|
||||
SmallVectorImpl<SDValue> &InVals) const;
|
||||
|
||||
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
|
||||
/// for tail call optimization. Targets which want to do tail call
|
||||
/// optimization should implement this function.
|
||||
bool IsEligibleForTailCallOptimization(SDValue Callee,
|
||||
CallingConv::ID CalleeCC,
|
||||
bool isVarArg,
|
||||
bool isCalleeStructRet,
|
||||
bool isCallerStructRet,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
SelectionDAG& DAG) const;
|
||||
virtual SDValue
|
||||
LowerReturn(SDValue Chain,
|
||||
CallingConv::ID CallConv, bool isVarArg,
|
||||
|
@ -53,6 +53,8 @@ def SDT_ARMSYNCBARRIERV7 : SDTypeProfile<0, 0, []>;
|
||||
def SDT_ARMMEMBARRIERV6 : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
|
||||
def SDT_ARMSYNCBARRIERV6 : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
|
||||
|
||||
def SDT_ARMTCRET : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
|
||||
|
||||
// Node definitions.
|
||||
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
|
||||
def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>;
|
||||
@ -117,6 +119,9 @@ def ARMSyncBarrierV6 : SDNode<"ARMISD::SYNCBARRIER", SDT_ARMMEMBARRIERV6,
|
||||
|
||||
def ARMrbit : SDNode<"ARMISD::RBIT", SDTIntUnaryOp>;
|
||||
|
||||
def ARMtcret : SDNode<"ARMISD::TC_RETURN", SDT_ARMTCRET,
|
||||
[SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// ARM Instruction Predicate Definitions.
|
||||
//
|
||||
@ -1026,6 +1031,80 @@ let isCall = 1,
|
||||
}
|
||||
}
|
||||
|
||||
// Tail calls.
|
||||
|
||||
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
|
||||
// Darwin versions.
|
||||
let Defs = [R0, R1, R2, R3, R9, R12,
|
||||
D0, D1, D2, D3, D4, D5, D6, D7,
|
||||
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26,
|
||||
D27, D28, D29, D30, D31, PC],
|
||||
Uses = [SP] in {
|
||||
def TCRETURNdi : AInoP<(outs), (ins i32imm:$dst, variable_ops),
|
||||
Pseudo, IIC_Br,
|
||||
"@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
|
||||
|
||||
def TCRETURNri : AInoP<(outs), (ins tGPR:$dst, variable_ops),
|
||||
Pseudo, IIC_Br,
|
||||
"@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
|
||||
|
||||
def TAILJMPd : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
|
||||
IIC_Br, "b\t$dst @ TAILCALL",
|
||||
[]>, Requires<[IsDarwin]>;
|
||||
|
||||
def TAILJMPr : AXI<(outs), (ins tGPR:$dst, variable_ops),
|
||||
BrMiscFrm, IIC_Br, "bx\t$dst @ TAILCALL",
|
||||
[]>, Requires<[IsDarwin]> {
|
||||
let Inst{7-4} = 0b0001;
|
||||
let Inst{19-8} = 0b111111111111;
|
||||
let Inst{27-20} = 0b00010010;
|
||||
let Inst{31-28} = 0b1110;
|
||||
}
|
||||
|
||||
// FIXME: This is a hack so that MCInst lowering can preserve the TAILCALL
|
||||
// marker on instructions, while still being able to relax.
|
||||
// let isCodeGenOnly = 1 in {
|
||||
// def TAILJMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
|
||||
// "jmp\t$dst @ TAILCALL", []>,
|
||||
// Requires<[IsARM, IsDarwin]>;
|
||||
}
|
||||
|
||||
// Non-Darwin versions (the difference is R9).
|
||||
let Defs = [R0, R1, R2, R3, R12,
|
||||
D0, D1, D2, D3, D4, D5, D6, D7,
|
||||
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26,
|
||||
D27, D28, D29, D30, D31, PC],
|
||||
Uses = [SP] in {
|
||||
def TCRETURNdiND : AInoP<(outs), (ins i32imm:$dst, variable_ops),
|
||||
Pseudo, IIC_Br,
|
||||
"@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
|
||||
|
||||
def TCRETURNriND : AInoP<(outs), (ins tGPR:$dst, variable_ops),
|
||||
Pseudo, IIC_Br,
|
||||
"@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
|
||||
|
||||
def TAILJMPdND : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
|
||||
IIC_Br, "b\t$dst @ TAILCALL",
|
||||
[]>, Requires<[IsNotDarwin]>;
|
||||
|
||||
def TAILJMPrND : AXI<(outs), (ins tGPR:$dst, variable_ops),
|
||||
BrMiscFrm, IIC_Br, "bx\t$dst @ TAILCALL",
|
||||
[]>, Requires<[IsNotDarwin]> {
|
||||
let Inst{7-4} = 0b0001;
|
||||
let Inst{19-8} = 0b111111111111;
|
||||
let Inst{27-20} = 0b00010010;
|
||||
let Inst{31-28} = 0b1110;
|
||||
}
|
||||
|
||||
// FIXME: This is a hack so that MCInst lowering can preserve the TAILCALL
|
||||
// marker on instructions, while still being able to relax.
|
||||
// let isCodeGenOnly = 1 in {
|
||||
// def TAILJMP_1ND : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
|
||||
// "jmp\t$dst @ TAILCALL", []>,
|
||||
// Requires<[IsARM, IsNotDarwin]>;
|
||||
}
|
||||
}
|
||||
|
||||
let isBranch = 1, isTerminator = 1 in {
|
||||
// B is "predicable" since it can be xformed into a Bcc.
|
||||
let isBarrier = 1 in {
|
||||
@ -2620,6 +2699,24 @@ def : ARMPat<(ARMWrapperJT tjumptable:$dst, imm:$id),
|
||||
|
||||
// TODO: add,sub,and, 3-instr forms?
|
||||
|
||||
// Tail calls
|
||||
def : ARMPat<(ARMtcret tGPR:$dst),
|
||||
(TCRETURNri tGPR:$dst)>, Requires<[IsDarwin]>;
|
||||
|
||||
def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
|
||||
(TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
|
||||
|
||||
def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
|
||||
(TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
|
||||
|
||||
def : ARMPat<(ARMtcret tGPR:$dst),
|
||||
(TCRETURNriND tGPR:$dst)>, Requires<[IsNotDarwin]>;
|
||||
|
||||
def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
|
||||
(TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
|
||||
|
||||
def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
|
||||
(TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
|
||||
|
||||
// Direct calls
|
||||
def : ARMPat<(ARMcall texternalsym:$func), (BL texternalsym:$func)>,
|
||||
|
@ -1575,6 +1575,13 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
|
||||
if (Name == "BXr9" || Name == "BMOVPCRX" || Name == "BMOVPCRXr9")
|
||||
return false;
|
||||
|
||||
// Tail calls are other patterns that generate existing instructions.
|
||||
if (Name == "TCRETURNdi" || Name == "TCRETURNdiND" ||
|
||||
Name == "TCRETURNri" || Name == "TCRETURNriND" ||
|
||||
Name == "TAILJMPd" || Name == "TAILJMPdND" ||
|
||||
Name == "TAILJMPr" || Name == "TAILJMPrND")
|
||||
return false;
|
||||
|
||||
// VLDMQ/VSTMQ can be hanlded with the more generic VLDMD/VSTMD.
|
||||
if (Name == "VLDMQ" || Name == "VLDMQ_UPD" ||
|
||||
Name == "VSTMQ" || Name == "VSTMQ_UPD")
|
||||
|
Loading…
Reference in New Issue
Block a user