diff --git a/include/llvm/Target/TargetCallingConv.td b/include/llvm/Target/TargetCallingConv.td index 224c08e176c..da3cbd20886 100644 --- a/include/llvm/Target/TargetCallingConv.td +++ b/include/llvm/Target/TargetCallingConv.td @@ -54,6 +54,10 @@ class CCIfInReg : CCIf<"ArgFlags.isInReg()", A> {} /// the specified action. class CCIfNest : CCIf<"ArgFlags.isNest()", A> {} +/// CCIfSplit - If this argument is marked with the 'split' attribute, apply +/// the specified action. +class CCIfSplit : CCIf<"ArgFlags.isSplit()", A> {} + /// CCIfNotVarArg - If the current function is not vararg - apply the action class CCIfNotVarArg : CCIf<"!State.isVarArg()", A> {} diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td index 9f916f38d5e..8090e620df3 100644 --- a/lib/Target/PowerPC/PPCCallingConv.td +++ b/lib/Target/PowerPC/PPCCallingConv.td @@ -64,3 +64,86 @@ def CC_PPC : CallingConv<[ */ +//===----------------------------------------------------------------------===// +// PowerPC System V Release 4 ABI +//===----------------------------------------------------------------------===// + +// _Complex arguments are never split, thus their two scalars are either +// passed both in argument registers or both on the stack. Also _Complex +// arguments are always passed in general purpose registers, never in +// Floating-point registers or vector registers. Arguments which should go +// on the stack are marked with the inreg parameter attribute. +// Giving inreg this target-dependent (and counter-intuitive) meaning +// simplifies things, because functions calls are not always coming from the +// frontend but are also created implicitly e.g. for libcalls. If inreg would +// actually mean that the argument is passed in a register, then all places +// which create function calls/function definitions implicitly would need to +// be aware of this fact and would need to mark arguments accordingly. With +// inreg meaning that the argument is passed on the stack, this is not an +// issue, except for calls which involve _Complex types. + +def CC_PPC_SVR4_Common : CallingConv<[ + // The ABI requires i64 to be passed in two adjacent registers with the first + // register having an odd register number. + CCIfType<[i32], CCIfSplit>>, + + // The first 8 integer arguments are passed in integer registers. + CCIfType<[i32], CCIf<"!ArgFlags.isInReg()", + CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, + + // Make sure the i64 words from a long double are either both passed in + // registers or both passed on the stack. + CCIfType<[f64], CCIfSplit>>, + + // FP values are passed in F1 - F8. + CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, + + // Split arguments have an alignment of 8 bytes on the stack. + CCIfType<[i32], CCIfSplit>>, + + CCIfType<[i32], CCAssignToStack<4, 4>>, + + // Floats are stored in double precision format, thus they have the same + // alignment and size as doubles. + CCIfType<[f32,f64], CCAssignToStack<8, 8>>, + + // Vectors get 16-byte stack slots that are 16-byte aligned. + CCIfType<[v16i8, v8i16, v4i32, v4f32], CCAssignToStack<16, 16>> +]>; + +// This calling convention puts vector arguments always on the stack. It is used +// to assign vector arguments which belong to the variable portion of the +// parameter list of a variable argument function. +def CC_PPC_SVR4_VarArg : CallingConv<[ + CCDelegateTo +]>; + +// In contrast to CC_PPC_SVR4_VarArg, this calling convention first tries to put +// vector arguments in vector registers before putting them on the stack. +def CC_PPC_SVR4 : CallingConv<[ + // The first 12 Vector arguments are passed in AltiVec registers. + CCIfType<[v16i8, v8i16, v4i32, v4f32], + CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13]>>, + + CCDelegateTo +]>; + +// Helper "calling convention" to handle aggregate by value arguments. +// Aggregate by value arguments are always placed in the local variable space +// of the caller. This calling convention is only used to assign those stack +// offsets in the callers stack frame. +// +// Still, the address of the aggregate copy in the callers stack frame is passed +// in a GPR (or in the parameter list area if all GPRs are allocated) from the +// caller to the callee. The location for the address argument is assigned by +// the CC_PPC_SVR4 calling convention. +// +// The only purpose of CC_PPC_SVR4_Custom_Dummy is to skip arguments which are +// not passed by value. + +def CC_PPC_SVR4_ByVal : CallingConv<[ + CCIfByVal>, + + CCCustom<"CC_PPC_SVR4_Custom_Dummy"> +]>; + diff --git a/lib/Target/PowerPC/PPCFrameInfo.h b/lib/Target/PowerPC/PPCFrameInfo.h index 1b5893da0ce..2476a33d151 100644 --- a/lib/Target/PowerPC/PPCFrameInfo.h +++ b/lib/Target/PowerPC/PPCFrameInfo.h @@ -14,8 +14,10 @@ #define POWERPC_FRAMEINFO_H #include "PPC.h" +#include "PPCSubtarget.h" #include "llvm/Target/TargetFrameInfo.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/ADT/STLExtras.h" namespace llvm { @@ -85,7 +87,96 @@ public: return getLinkageSize(LP64, isMacho) + getMinCallArgumentsSize(LP64, isMacho); } - + + // With the SVR4 ABI, callee-saved registers have fixed offsets on the stack. + const std::pair * + getCalleeSavedSpillSlots(unsigned &NumEntries) const { + // Early exit if not using the SVR4 ABI. + if (!TM.getSubtarget().isELF32_ABI()) { + NumEntries = 0; + return 0; + } + + static const std::pair Offsets[] = { + // Floating-point register save area offsets. + std::pair(PPC::F31, -8), + std::pair(PPC::F30, -16), + std::pair(PPC::F29, -24), + std::pair(PPC::F28, -32), + std::pair(PPC::F27, -40), + std::pair(PPC::F26, -48), + std::pair(PPC::F25, -56), + std::pair(PPC::F24, -64), + std::pair(PPC::F23, -72), + std::pair(PPC::F22, -80), + std::pair(PPC::F21, -88), + std::pair(PPC::F20, -96), + std::pair(PPC::F19, -104), + std::pair(PPC::F18, -112), + std::pair(PPC::F17, -120), + std::pair(PPC::F16, -128), + std::pair(PPC::F15, -136), + std::pair(PPC::F14, -144), + + // General register save area offsets. + std::pair(PPC::R31, -4), + std::pair(PPC::R30, -8), + std::pair(PPC::R29, -12), + std::pair(PPC::R28, -16), + std::pair(PPC::R27, -20), + std::pair(PPC::R26, -24), + std::pair(PPC::R25, -28), + std::pair(PPC::R24, -32), + std::pair(PPC::R23, -36), + std::pair(PPC::R22, -40), + std::pair(PPC::R21, -44), + std::pair(PPC::R20, -48), + std::pair(PPC::R19, -52), + std::pair(PPC::R18, -56), + std::pair(PPC::R17, -60), + std::pair(PPC::R16, -64), + std::pair(PPC::R15, -68), + std::pair(PPC::R14, -72), + + // CR save area offset. + std::pair(PPC::CR2, -4), + std::pair(PPC::CR3, -4), + std::pair(PPC::CR4, -4), + std::pair(PPC::CR2LT, -4), + std::pair(PPC::CR2GT, -4), + std::pair(PPC::CR2EQ, -4), + std::pair(PPC::CR2UN, -4), + std::pair(PPC::CR3LT, -4), + std::pair(PPC::CR3GT, -4), + std::pair(PPC::CR3EQ, -4), + std::pair(PPC::CR3UN, -4), + std::pair(PPC::CR4LT, -4), + std::pair(PPC::CR4GT, -4), + std::pair(PPC::CR4EQ, -4), + std::pair(PPC::CR4UN, -4), + + // VRSAVE save area offset. + std::pair(PPC::VRSAVE, -4), + + // Vector register save area + std::pair(PPC::V31, -16), + std::pair(PPC::V30, -32), + std::pair(PPC::V29, -48), + std::pair(PPC::V28, -64), + std::pair(PPC::V27, -80), + std::pair(PPC::V26, -96), + std::pair(PPC::V25, -112), + std::pair(PPC::V24, -128), + std::pair(PPC::V23, -144), + std::pair(PPC::V22, -160), + std::pair(PPC::V21, -176), + std::pair(PPC::V20, -192) + }; + + NumEntries = array_lengthof(Offsets); + + return Offsets; + } }; } // End llvm namespace diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 7bb764628ea..08307aaf293 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -35,6 +35,21 @@ #include "llvm/DerivedTypes.h" using namespace llvm; +static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, + CCState &State); +static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, + CCState &State); +static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, + CCState &State); + static cl::opt EnablePPCPreinc("enable-ppc-preinc", cl::desc("enable preincrement load/store generation on PPC (experimental)"), cl::Hidden); @@ -1319,8 +1334,8 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, // } va_list[1]; - SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8); - SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); + SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i32); + SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i32); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); @@ -1340,15 +1355,15 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, const Value *SV = cast(Op.getOperand(2))->getValue(); // Store first byte : number of int regs - SDValue firstStore = DAG.getStore(Op.getOperand(0), dl, ArgGPR, - Op.getOperand(1), SV, 0); + SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, + Op.getOperand(1), SV, 0, MVT::i8); uint64_t nextOffset = FPROffset; SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), ConstFPROffset); // Store second byte : number of float regs SDValue secondStore = - DAG.getStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset); + DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset, MVT::i8); nextOffset += StackOffset; nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); @@ -1365,6 +1380,67 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, #include "PPCGenCallingConv.inc" +static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, + CCState &State) { + return true; +} + +static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, + CCState &State) { + static const unsigned ArgRegs[] = { + PPC::R3, PPC::R4, PPC::R5, PPC::R6, + PPC::R7, PPC::R8, PPC::R9, PPC::R10, + }; + const unsigned NumArgRegs = array_lengthof(ArgRegs); + + unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); + + // Skip one register if the first unallocated register has an even register + // number and there are still argument registers available which have not been + // allocated yet. RegNum is actually an index into ArgRegs, which means we + // need to skip a register if RegNum is odd. + if (RegNum != NumArgRegs && RegNum % 2 == 1) { + State.AllocateReg(ArgRegs[RegNum]); + } + + // Always return false here, as this function only makes sure that the first + // unallocated register has an odd register number and does not actually + // allocate a register for the current argument. + return false; +} + +static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, + CCState &State) { + static const unsigned ArgRegs[] = { + PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, + PPC::F8 + }; + + const unsigned NumArgRegs = array_lengthof(ArgRegs); + + unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); + + // If there is only one Floating-point register left we need to put both f64 + // values of a split ppc_fp128 value on the stack. + if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { + State.AllocateReg(ArgRegs[RegNum]); + } + + // Always return false here, as this function only makes sure that the two f64 + // values a ppc_fp128 value is split into are both passed in registers or both + // passed on the stack and does not actually allocate a register for the + // current argument. + return false; +} + /// GetFPR - Get the set of FP registers that should be allocated for arguments, /// depending on which subtarget is selected. static const unsigned *GetFPR(const PPCSubtarget &Subtarget) { @@ -1397,6 +1473,240 @@ static unsigned CalculateStackSlotSize(SDValue Arg, ISD::ArgFlagsTy Flags, return ArgSize; } +SDValue +PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op, + SelectionDAG &DAG, + int &VarArgsFrameIndex, + int &VarArgsStackOffset, + unsigned &VarArgsNumGPR, + unsigned &VarArgsNumFPR, + const PPCSubtarget &Subtarget) { + // SVR4 ABI Stack Frame Layout: + // +-----------------------------------+ + // +--> | Back chain | + // | +-----------------------------------+ + // | | Floating-point register save area | + // | +-----------------------------------+ + // | | General register save area | + // | +-----------------------------------+ + // | | CR save word | + // | +-----------------------------------+ + // | | VRSAVE save word | + // | +-----------------------------------+ + // | | Alignment padding | + // | +-----------------------------------+ + // | | Vector register save area | + // | +-----------------------------------+ + // | | Local variable space | + // | +-----------------------------------+ + // | | Parameter list area | + // | +-----------------------------------+ + // | | LR save word | + // | +-----------------------------------+ + // SP--> +--- | Back chain | + // +-----------------------------------+ + // + // Specifications: + // System V Application Binary Interface PowerPC Processor Supplement + // AltiVec Technology Programming Interface Manual + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + SmallVector ArgValues; + SDValue Root = Op.getOperand(0); + bool isVarArg = cast(Op.getOperand(2))->getZExtValue() != 0; + DebugLoc dl = Op.getDebugLoc(); + + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + // Potential tail calls could cause overwriting of argument stack slots. + unsigned CC = MF.getFunction()->getCallingConv(); + bool isImmutable = !(PerformTailCallOpt && (CC==CallingConv::Fast)); + unsigned PtrByteSize = 4; + + // Assign locations to all of the incoming arguments. + SmallVector ArgLocs; + CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); + + // Reserve space for the linkage area on the stack. + CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); + + CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_PPC_SVR4); + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + + // Arguments stored in registers. + if (VA.isRegLoc()) { + TargetRegisterClass *RC; + MVT ValVT = VA.getValVT(); + + switch (ValVT.getSimpleVT()) { + default: + assert(0 && "ValVT not supported by FORMAL_ARGUMENTS Lowering"); + case MVT::i32: + RC = PPC::GPRCRegisterClass; + break; + case MVT::f32: + RC = PPC::F4RCRegisterClass; + break; + case MVT::f64: + RC = PPC::F8RCRegisterClass; + break; + case MVT::v16i8: + case MVT::v8i16: + case MVT::v4i32: + case MVT::v4f32: + RC = PPC::VRRCRegisterClass; + break; + } + + // Transform the arguments stored in physical registers into virtual ones. + unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); + SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, ValVT); + + ArgValues.push_back(ArgValue); + } else { + // Argument stored in memory. + assert(VA.isMemLoc()); + + unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; + int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), + isImmutable); + + // Create load nodes to retrieve arguments from the stack. + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); + ArgValues.push_back(DAG.getLoad(VA.getValVT(), dl, Root, FIN, NULL, 0)); + } + } + + // Assign locations to all of the incoming aggregate by value arguments. + // Aggregates passed by value are stored in the local variable space of the + // caller's stack frame, right above the parameter list area. + SmallVector ByValArgLocs; + CCState CCByValInfo(CC, isVarArg, getTargetMachine(), ByValArgLocs); + + // Reserve stack space for the allocations in CCInfo. + CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); + + CCByValInfo.AnalyzeFormalArguments(Op.getNode(), CC_PPC_SVR4_ByVal); + + // Area that is at least reserved in the caller of this function. + unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); + + // Set the size that is at least reserved in caller of this function. Tail + // call optimized function's reserved stack space needs to be aligned so that + // taking the difference between two stack areas will result in an aligned + // stack. + PPCFunctionInfo *FI = MF.getInfo(); + + MinReservedArea = + std::max(MinReservedArea, + PPCFrameInfo::getMinCallFrameSize(false, false)); + + unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> + getStackAlignment(); + unsigned AlignMask = TargetAlign-1; + MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; + + FI->setMinReservedArea(MinReservedArea); + + SmallVector MemOps; + + // If the function takes variable number of arguments, make a frame index for + // the start of the first vararg value... for expansion of llvm.va_start. + if (isVarArg) { + static const unsigned GPArgRegs[] = { + PPC::R3, PPC::R4, PPC::R5, PPC::R6, + PPC::R7, PPC::R8, PPC::R9, PPC::R10, + }; + const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); + + static const unsigned FPArgRegs[] = { + PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, + PPC::F8 + }; + const unsigned NumFPArgRegs = array_lengthof(FPArgRegs); + + VarArgsNumGPR = CCInfo.getFirstUnallocated(GPArgRegs, NumGPArgRegs); + VarArgsNumFPR = CCInfo.getFirstUnallocated(FPArgRegs, NumFPArgRegs); + + // Make room for NumGPArgRegs and NumFPArgRegs. + int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + + NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; + + VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, + CCInfo.getNextStackOffset()); + + VarArgsFrameIndex = MFI->CreateStackObject(Depth, 8); + SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); + + // The fixed integer arguments of a variadic function are + // stored to the VarArgsFrameIndex on the stack. + unsigned GPRIndex = 0; + for (; GPRIndex != VarArgsNumGPR; ++GPRIndex) { + SDValue Val = DAG.getRegister(GPArgRegs[GPRIndex], PtrVT); + SDValue Store = DAG.getStore(Root, dl, Val, FIN, NULL, 0); + MemOps.push_back(Store); + // Increment the address by four for the next argument to store + SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); + FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); + } + + // If this function is vararg, store any remaining integer argument regs + // to their spots on the stack so that they may be loaded by deferencing the + // result of va_next. + for (; GPRIndex != NumGPArgRegs; ++GPRIndex) { + unsigned VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); + + SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT); + SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); + MemOps.push_back(Store); + // Increment the address by four for the next argument to store + SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); + FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); + } + + // FIXME SVR4: We only need to save FP argument registers if CR bit 6 is + // set. + + // The double arguments are stored to the VarArgsFrameIndex + // on the stack. + unsigned FPRIndex = 0; + for (FPRIndex = 0; FPRIndex != VarArgsNumFPR; ++FPRIndex) { + SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], MVT::f64); + SDValue Store = DAG.getStore(Root, dl, Val, FIN, NULL, 0); + MemOps.push_back(Store); + // Increment the address by eight for the next argument to store + SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, + PtrVT); + FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); + } + + for (; FPRIndex != NumFPArgRegs; ++FPRIndex) { + unsigned VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); + + SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::f64); + SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0); + MemOps.push_back(Store); + // Increment the address by eight for the next argument to store + SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, + PtrVT); + FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); + } + } + + if (!MemOps.empty()) + Root = DAG.getNode(ISD::TokenFactor, dl, + MVT::Other, &MemOps[0], MemOps.size()); + + + ArgValues.push_back(Root); + + // Return the new list of results. + return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(), + &ArgValues[0], ArgValues.size()).getValue(Op.getResNo()); +} + SDValue PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, @@ -2023,17 +2333,21 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, isMachoABI); int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewRetAddrLoc); - int NewFPLoc = SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64, - isMachoABI); - int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc); - MVT VT = isPPC64 ? MVT::i64 : MVT::i32; SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, PseudoSourceValue::getFixedStack(NewRetAddr), 0); - SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); - Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, - PseudoSourceValue::getFixedStack(NewFPIdx), 0); + + // When using the SVR4 ABI there is no need to move the FP stack slot + // as the FP is never overwritten. + if (isMachoABI) { + int NewFPLoc = + SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64, isMachoABI); + int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc); + SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); + Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, + PseudoSourceValue::getFixedStack(NewFPIdx), 0); + } } return Chain; } @@ -2064,6 +2378,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, SDValue Chain, SDValue &LROpOut, SDValue &FPOpOut, + bool isMachoABI, DebugLoc dl) { if (SPDiff) { // Load the LR and FP stack slot for later adjusting. @@ -2071,9 +2386,14 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, LROpOut = getReturnAddrFrameIndex(DAG); LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, NULL, 0); Chain = SDValue(LROpOut.getNode(), 1); - FPOpOut = getFramePointerFrameIndex(DAG); - FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, NULL, 0); - Chain = SDValue(FPOpOut.getNode(), 1); + + // When using the SVR4 ABI there is no need to load the FP stack slot + // as the FP is never overwritten. + if (isMachoABI) { + FPOpOut = getFramePointerFrameIndex(DAG); + FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, NULL, 0); + Chain = SDValue(FPOpOut.getNode(), 1); + } } return Chain; } @@ -2119,6 +2439,330 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, TailCallArguments); } +SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG, + const PPCSubtarget &Subtarget, + TargetMachine &TM) { + // See PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4() for a description + // of the SVR4 ABI stack frame layout. + CallSDNode *TheCall = cast(Op.getNode()); + SDValue Chain = TheCall->getChain(); + bool isVarArg = TheCall->isVarArg(); + unsigned CC = TheCall->getCallingConv(); + assert((CC == CallingConv::C || + CC == CallingConv::Fast) && "Unknown calling convention!"); + bool isTailCall = TheCall->isTailCall() + && CC == CallingConv::Fast && PerformTailCallOpt; + SDValue Callee = TheCall->getCallee(); + DebugLoc dl = TheCall->getDebugLoc(); + + MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + unsigned PtrByteSize = 4; + + MachineFunction &MF = DAG.getMachineFunction(); + + // Mark this function as potentially containing a function that contains a + // tail call. As a consequence the frame pointer will be used for dynamicalloc + // and restoring the callers stack pointer in this functions epilog. This is + // done because by tail calling the called function might overwrite the value + // in this function's (MF) stack pointer stack slot 0(SP). + if (PerformTailCallOpt && CC==CallingConv::Fast) + MF.getInfo()->setHasFastCall(); + + // Count how many bytes are to be pushed on the stack, including the linkage + // area, parameter list area and the part of the local variable space which + // contains copies of aggregates which are passed by value. + + // Assign locations to all of the outgoing arguments. + SmallVector ArgLocs; + CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); + + // Reserve space for the linkage area on the stack. + CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); + + if (isVarArg) { + // Handle fixed and variable vector arguments differently. + // Fixed vector arguments go into registers as long as registers are + // available. Variable vector arguments always go into memory. + unsigned NumArgs = TheCall->getNumArgs(); + unsigned NumFixedArgs = TheCall->getNumFixedArgs(); + + for (unsigned i = 0; i != NumArgs; ++i) { + MVT ArgVT = TheCall->getArg(i).getValueType(); + ISD::ArgFlagsTy ArgFlags = TheCall->getArgFlags(i); + bool Result; + + if (i < NumFixedArgs) { + Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, + CCInfo); + } else { + Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, + ArgFlags, CCInfo); + } + + if (Result) { + cerr << "Call operand #" << i << " has unhandled type " + << ArgVT.getMVTString() << "\n"; + abort(); + } + } + } else { + // All arguments are treated the same. + CCInfo.AnalyzeCallOperands(TheCall, CC_PPC_SVR4); + } + + // Assign locations to all of the outgoing aggregate by value arguments. + SmallVector ByValArgLocs; + CCState CCByValInfo(CC, isVarArg, getTargetMachine(), ByValArgLocs); + + // Reserve stack space for the allocations in CCInfo. + CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); + + CCByValInfo.AnalyzeCallOperands(TheCall, CC_PPC_SVR4_ByVal); + + // Size of the linkage area, parameter list area and the part of the local + // space variable where copies of aggregates which are passed by value are + // stored. + unsigned NumBytes = CCByValInfo.getNextStackOffset(); + + // Calculate by how many bytes the stack has to be adjusted in case of tail + // call optimization. + int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); + + // Adjust the stack pointer for the new arguments... + // These operations are automatically eliminated by the prolog/epilog pass + Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); + SDValue CallSeqStart = Chain; + + // Load the return address and frame pointer so it can be moved somewhere else + // later. + SDValue LROp, FPOp; + Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, + dl); + + // Set up a copy of the stack pointer for use loading and storing any + // arguments that may not fit in the registers available for argument + // passing. + SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); + + SmallVector, 8> RegsToPass; + SmallVector TailCallArguments; + SmallVector MemOpChains; + + // Walk the register/memloc assignments, inserting copies/loads. + for (unsigned i = 0, j = 0, e = ArgLocs.size(); + i != e; + ++i) { + CCValAssign &VA = ArgLocs[i]; + SDValue Arg = TheCall->getArg(i); + ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i); + + if (Flags.isByVal()) { + // Argument is an aggregate which is passed by value, thus we need to + // create a copy of it in the local variable space of the current stack + // frame (which is the stack frame of the caller) and pass the address of + // this copy to the callee. + assert((j < ByValArgLocs.size()) && "Index out of bounds!"); + CCValAssign &ByValVA = ByValArgLocs[j++]; + assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); + + // Memory reserved in the local variable space of the callers stack frame. + unsigned LocMemOffset = ByValVA.getLocMemOffset(); + + SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); + PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); + + // Create a copy of the argument in the local area of the current + // stack frame. + SDValue MemcpyCall = + CreateCopyOfByValArgument(Arg, PtrOff, + CallSeqStart.getNode()->getOperand(0), + Flags, DAG, dl); + + // This must go outside the CALLSEQ_START..END. + SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, + CallSeqStart.getNode()->getOperand(1)); + DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), + NewCallSeqStart.getNode()); + Chain = CallSeqStart = NewCallSeqStart; + + // Pass the address of the aggregate copy on the stack either in a + // physical register or in the parameter list area of the current stack + // frame to the callee. + Arg = PtrOff; + } + + if (VA.isRegLoc()) { + // Put argument in a physical register. + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + } else { + // Put argument in the parameter list area of the current stack frame. + assert(VA.isMemLoc()); + unsigned LocMemOffset = VA.getLocMemOffset(); + + if (!isTailCall) { + SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); + PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); + + MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, + PseudoSourceValue::getStack(), LocMemOffset)); + } else { + // Calculate and remember argument location. + CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, + TailCallArguments); + } + } + } + + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &MemOpChains[0], MemOpChains.size()); + + // Build a sequence of copy-to-reg nodes chained together with token chain + // and flag operands which copy the outgoing args into the appropriate regs. + SDValue InFlag; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); + } + + // Set CR6 to true if this is a vararg call. + if (isVarArg) { + SDValue SetCR(DAG.getTargetNode(PPC::CRSET, dl, MVT::i32), 0); + Chain = DAG.getCopyToReg(Chain, dl, PPC::CR1EQ, SetCR, InFlag); + InFlag = Chain.getValue(1); + } + + // Emit a sequence of copyto/copyfrom virtual registers for arguments that + // might overwrite each other in case of tail call optimization. + if (isTailCall) { + SmallVector MemOpChains2; + // Do not flag preceeding copytoreg stuff together with the following stuff. + InFlag = SDValue(); + StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, + MemOpChains2, dl); + if (!MemOpChains2.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &MemOpChains2[0], MemOpChains2.size()); + + // Store the return address to the appropriate stack slot. + Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, + false, false, dl); + } + + // Emit callseq_end just before tailcall node. + if (isTailCall) { + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), + DAG.getIntPtrConstant(0, true), InFlag); + InFlag = Chain.getValue(1); + } + + std::vector NodeTys; + NodeTys.push_back(MVT::Other); // Returns a chain + NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. + + SmallVector Ops; + unsigned CallOpc = PPCISD::CALL_ELF; + + // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every + // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol + // node so that legalize doesn't hack it. + if (GlobalAddressSDNode *G = dyn_cast(Callee)) + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType()); + else if (ExternalSymbolSDNode *S = dyn_cast(Callee)) + Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); + else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) + // If this is an absolute destination address, use the munged value. + Callee = SDValue(Dest, 0); + else { + // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair + // to do the call, we can't use PPCISD::CALL. + SDValue MTCTROps[] = {Chain, Callee, InFlag}; + Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, + 2 + (InFlag.getNode() != 0)); + InFlag = Chain.getValue(1); + + NodeTys.clear(); + NodeTys.push_back(MVT::Other); + NodeTys.push_back(MVT::Flag); + Ops.push_back(Chain); + CallOpc = PPCISD::BCTRL_ELF; + Callee.setNode(0); + // Add CTR register as callee so a bctr can be emitted later. + if (isTailCall) + Ops.push_back(DAG.getRegister(PPC::CTR, getPointerTy())); + } + + // If this is a direct call, pass the chain and the callee. + if (Callee.getNode()) { + Ops.push_back(Chain); + Ops.push_back(Callee); + } + // If this is a tail call add stack pointer delta. + if (isTailCall) + Ops.push_back(DAG.getConstant(SPDiff, MVT::i32)); + + // Add argument registers to the end of the list so that they are known live + // into the call. + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); + + // When performing tail call optimization the callee pops its arguments off + // the stack. Account for this here so these bytes can be pushed back on in + // PPCRegisterInfo::eliminateCallFramePseudoInstr. + int BytesCalleePops = + (CC==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0; + + if (InFlag.getNode()) + Ops.push_back(InFlag); + + // Emit tail call. + if (isTailCall) { + assert(InFlag.getNode() && + "Flag must be set. Depend on flag being set in LowerRET"); + Chain = DAG.getNode(PPCISD::TAILCALL, dl, + TheCall->getVTList(), &Ops[0], Ops.size()); + return SDValue(Chain.getNode(), Op.getResNo()); + } + + Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); + InFlag = Chain.getValue(1); + + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), + DAG.getIntPtrConstant(BytesCalleePops, true), + InFlag); + if (TheCall->getValueType(0) != MVT::Other) + InFlag = Chain.getValue(1); + + SmallVector ResultVals; + SmallVector RVLocs; + unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv(); + CCState CCRetInfo(CallerCC, isVarArg, TM, RVLocs); + CCRetInfo.AnalyzeCallResult(TheCall, RetCC_PPC); + + // Copy all of the result registers out of their specified physreg. + for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { + CCValAssign &VA = RVLocs[i]; + MVT VT = VA.getValVT(); + assert(VA.isRegLoc() && "Can only return in registers!"); + Chain = DAG.getCopyFromReg(Chain, dl, + VA.getLocReg(), VT, InFlag).getValue(1); + ResultVals.push_back(Chain.getValue(0)); + InFlag = Chain.getValue(2); + } + + // If the function returns void, just return the chain. + if (RVLocs.empty()) + return Chain; + + // Otherwise, merge everything together with a MERGE_VALUES node. + ResultVals.push_back(Chain); + SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(), + &ResultVals[0], ResultVals.size()); + return Res.getValue(Op.getResNo()); +} + SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget, TargetMachine &TM) { @@ -2170,7 +2814,8 @@ SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG, // Load the return address and frame pointer so it can be move somewhere else // later. SDValue LROp, FPOp; - Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); + Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, + dl); // Set up a copy of the stack pointer for use loading and storing any // arguments that may not fit in the registers available for argument @@ -2468,13 +3113,6 @@ SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG, InFlag = Chain.getValue(1); } - // With the ELF 32 ABI, set CR6 to true if this is a vararg call. - if (isVarArg && isELF32_ABI) { - SDValue SetCR(DAG.getTargetNode(PPC::CRSET, dl, MVT::i32), 0); - Chain = DAG.getCopyToReg(Chain, dl, PPC::CR1EQ, SetCR, InFlag); - InFlag = Chain.getValue(1); - } - // Emit a sequence of copyto/copyfrom virtual registers for arguments that // might overwrite each other in case of tail call optimization. if (isTailCall) { @@ -3722,12 +4360,23 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget); case ISD::FORMAL_ARGUMENTS: - return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex, - VarArgsStackOffset, VarArgsNumGPR, - VarArgsNumFPR, PPCSubTarget); + if (PPCSubTarget.isELF32_ABI()) { + return LowerFORMAL_ARGUMENTS_SVR4(Op, DAG, VarArgsFrameIndex, + VarArgsStackOffset, VarArgsNumGPR, + VarArgsNumFPR, PPCSubTarget); + } else { + return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex, + VarArgsStackOffset, VarArgsNumGPR, + VarArgsNumFPR, PPCSubTarget); + } - case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget, - getTargetMachine()); + case ISD::CALL: + if (PPCSubTarget.isELF32_ABI()) { + return LowerCALL_SVR4(Op, DAG, PPCSubTarget, getTargetMachine()); + } else { + return LowerCALL(Op, DAG, PPCSubTarget, getTargetMachine()); + } + case ISD::RET: return LowerRET(Op, DAG, getTargetMachine()); case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget); case ISD::DYNAMIC_STACKALLOC: @@ -4871,3 +5520,13 @@ PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { // The PowerPC target isn't yet aware of offsets. return false; } + +MVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr, + SelectionDAG &DAG) const { + if (this->PPCSubTarget.isPPC64()) { + return MVT::i64; + } else { + return MVT::i32; + } +} diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index 11c841c0d1a..803ffa1d923 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -335,6 +335,10 @@ namespace llvm { SelectionDAG &DAG) const; virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; + + virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align, + bool isSrcConst, bool isSrcStr, + SelectionDAG &DAG) const; /// getFunctionAlignment - Return the Log2 alignment of this function. virtual unsigned getFunctionAlignment(const Function *F) const; @@ -348,6 +352,7 @@ namespace llvm { SDValue Chain, SDValue &LROpOut, SDValue &FPOpOut, + bool isMachoABI, DebugLoc dl); SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG); @@ -365,6 +370,12 @@ namespace llvm { SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, int VarArgsFrameIndex, int VarArgsStackOffset, unsigned VarArgsNumGPR, unsigned VarArgsNumFPR, const PPCSubtarget &Subtarget); + SDValue LowerFORMAL_ARGUMENTS_SVR4(SDValue Op, SelectionDAG &DAG, + int &VarArgsFrameIndex, + int &VarArgsStackOffset, + unsigned &VarArgsNumGPR, + unsigned &VarArgsNumFPR, + const PPCSubtarget &Subtarget); SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex, int &VarArgsStackOffset, @@ -373,6 +384,8 @@ namespace llvm { const PPCSubtarget &Subtarget); SDValue LowerCALL(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget, TargetMachine &TM); + SDValue LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG, + const PPCSubtarget &Subtarget, TargetMachine &TM); SDValue LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM); SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget); diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index 772e25ad232..58c81a2f76e 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -442,8 +442,8 @@ let isCall = 1, PPC970_Unit = 7, // ELF ABI Calls. let isCall = 1, PPC970_Unit = 7, // All calls clobber the non-callee saved registers... - Defs = [R0,R2,R3,R4,R5,R6,R7,R8,R9,R10,R11,R12, - F0,F1,F2,F3,F4,F5,F6,F7,F8, + Defs = [R0,R3,R4,R5,R6,R7,R8,R9,R10,R11,R12, + F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,F12,F13, V0,V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16,V17,V18,V19, LR,CTR, CR0,CR1,CR5,CR6,CR7, diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index cb315062abf..fd052792ea1 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -175,14 +175,12 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { }; static const unsigned ELF32_CalleeSavedRegs[] = { - PPC::R13, PPC::R14, PPC::R15, + PPC::R14, PPC::R15, PPC::R16, PPC::R17, PPC::R18, PPC::R19, PPC::R20, PPC::R21, PPC::R22, PPC::R23, PPC::R24, PPC::R25, PPC::R26, PPC::R27, PPC::R28, PPC::R29, PPC::R30, PPC::R31, - PPC::F9, - PPC::F10, PPC::F11, PPC::F12, PPC::F13, PPC::F14, PPC::F15, PPC::F16, PPC::F17, PPC::F18, PPC::F19, PPC::F20, PPC::F21, PPC::F22, PPC::F23, PPC::F24, PPC::F25, @@ -190,6 +188,9 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { PPC::F30, PPC::F31, PPC::CR2, PPC::CR3, PPC::CR4, + + PPC::VRSAVE, + PPC::V20, PPC::V21, PPC::V22, PPC::V23, PPC::V24, PPC::V25, PPC::V26, PPC::V27, PPC::V28, PPC::V29, PPC::V30, PPC::V31, @@ -267,14 +268,12 @@ PPCRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { }; static const TargetRegisterClass * const ELF32_CalleeSavedRegClasses[] = { - &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass, + &PPC::GPRCRegClass,&PPC::GPRCRegClass, &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass, &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass, &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass, &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass, - &PPC::F8RCRegClass, - &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass, &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass, &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass, &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass, @@ -283,6 +282,8 @@ PPCRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass, + &PPC::VRSAVERCRegClass, + &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass, &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass, &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass, @@ -358,10 +359,12 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { Reserved.set(PPC::LR8); Reserved.set(PPC::RM); - // In Linux, r2 is reserved for the OS. - if (!Subtarget.isDarwin()) - Reserved.set(PPC::R2); - + // The SVR4 ABI reserves r2 and r13 + if (Subtarget.isELF32_ABI()) { + Reserved.set(PPC::R2); // System-reserved register + Reserved.set(PPC::R13); // Small Data Area pointer register + } + // On PPC64, r13 is the thread pointer. Never allocate this register. Note // that this is over conservative, as it also prevents allocation of R31 when // the FP is not needed. @@ -909,6 +912,7 @@ void PPCRegisterInfo::determineFrameLayout(MachineFunction &MF) const { // don't have a frame pointer, calls, or dynamic alloca then we do not need // to adjust the stack pointer (we fit in the Red Zone). bool DisableRedZone = MF.getFunction()->hasFnAttr(Attribute::NoRedZone); + // FIXME SVR4 The SVR4 ABI has no red zone. if (!DisableRedZone && FrameSize <= 224 && // Fits in red zone. !MFI->hasVarSizedObjects() && // No dynamic alloca. @@ -963,8 +967,7 @@ PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, MachineFrameInfo *MFI = MF.getFrameInfo(); // If the frame pointer save index hasn't been defined yet. - if (!FPSI && (NoFramePointerElim || MFI->hasVarSizedObjects()) && - IsELF32_ABI) { + if (!FPSI && needsFP(MF) && IsELF32_ABI) { // Find out what the fix offset of the frame pointer save area. int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI); @@ -976,11 +979,10 @@ PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // Reserve stack space to move the linkage area to in case of a tail call. int TCSPDelta = 0; - if (PerformTailCallOpt && (TCSPDelta=FI->getTailCallSPDelta()) < 0) { - int AddFPOffsetAmount = IsELF32_ABI ? -4 : 0; - MF.getFrameInfo()->CreateFixedObject( -1 * TCSPDelta, - AddFPOffsetAmount + TCSPDelta); + if (PerformTailCallOpt && (TCSPDelta = FI->getTailCallSPDelta()) < 0) { + MF.getFrameInfo()->CreateFixedObject(-1 * TCSPDelta, TCSPDelta); } + // Reserve a slot closest to SP or frame pointer if we have a dynalloc or // a large stack, which will require scavenging a register to materialize a // large offset. @@ -998,6 +1000,169 @@ PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, } } +void +PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) + const { + // Early exit if not using the SVR4 ABI. + if (!Subtarget.isELF32_ABI()) { + return; + } + + // Get callee saved register information. + MachineFrameInfo *FFI = MF.getFrameInfo(); + const std::vector &CSI = FFI->getCalleeSavedInfo(); + + // Early exit if no callee saved registers are modified! + if (CSI.empty() && !needsFP(MF)) { + return; + } + + unsigned MinGPR = PPC::R31; + unsigned MinFPR = PPC::F31; + unsigned MinVR = PPC::V31; + + bool HasGPSaveArea = false; + bool HasFPSaveArea = false; + bool HasCRSaveArea = false; + bool HasVRSAVESaveArea = false; + bool HasVRSaveArea = false; + + SmallVector GPRegs; + SmallVector FPRegs; + SmallVector VRegs; + + for (unsigned i = 0, e = CSI.size(); i != e; ++i) { + unsigned Reg = CSI[i].getReg(); + const TargetRegisterClass *RC = CSI[i].getRegClass(); + + if (RC == PPC::GPRCRegisterClass) { + HasGPSaveArea = true; + + GPRegs.push_back(CSI[i]); + + if (Reg < MinGPR) { + MinGPR = Reg; + } + } else if (RC == PPC::F8RCRegisterClass) { + HasFPSaveArea = true; + + FPRegs.push_back(CSI[i]); + + if (Reg < MinFPR) { + MinFPR = Reg; + } + } else if ( RC == PPC::CRBITRCRegisterClass + || RC == PPC::CRRCRegisterClass) { + HasCRSaveArea = true; + } else if (RC == PPC::VRSAVERCRegisterClass) { + HasVRSAVESaveArea = true; + } else if (RC == PPC::VRRCRegisterClass) { + HasVRSaveArea = true; + + VRegs.push_back(CSI[i]); + + if (Reg < MinVR) { + MinVR = Reg; + } + } else { + assert(0 && "Unknown RegisterClass!"); + } + } + + PPCFunctionInfo *PFI = MF.getInfo(); + + int64_t LowerBound = 0; + + // Take into account stack space reserved for tail calls. + int TCSPDelta = 0; + if (PerformTailCallOpt && (TCSPDelta = PFI->getTailCallSPDelta()) < 0) { + LowerBound = TCSPDelta; + } + + // The Floating-point register save area is right below the back chain word + // of the previous stack frame. + if (HasFPSaveArea) { + for (unsigned i = 0, e = FPRegs.size(); i != e; ++i) { + int FI = FPRegs[i].getFrameIdx(); + + FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); + } + + LowerBound -= (31 - getRegisterNumbering(MinFPR) + 1) * 8; + } + + // Check whether the frame pointer register is allocated. If so, make sure it + // is spilled to the correct offset. + if (needsFP(MF)) { + HasGPSaveArea = true; + + int FI = PFI->getFramePointerSaveIndex(); + assert(FI && "No Frame Pointer Save Slot!"); + + FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); + } + + // General register save area starts right below the Floating-point + // register save area. + if (HasGPSaveArea) { + // Move general register save area spill slots down, taking into account + // the size of the Floating-point register save area. + for (unsigned i = 0, e = GPRegs.size(); i != e; ++i) { + int FI = GPRegs[i].getFrameIdx(); + + FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); + } + + LowerBound -= (31 - getRegisterNumbering(MinGPR) + 1) * 4; + } + + // The CR save area is below the general register save area. + if (HasCRSaveArea) { + // FIXME SVR4: Is it actually possible to have multiple elements in CSI + // which have the CR/CRBIT register class? + // Adjust the frame index of the CR spill slot. + for (unsigned i = 0, e = CSI.size(); i != e; ++i) { + const TargetRegisterClass *RC = CSI[i].getRegClass(); + + if (RC == PPC::CRBITRCRegisterClass || RC == PPC::CRRCRegisterClass) { + int FI = CSI[i].getFrameIdx(); + + FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); + } + } + + LowerBound -= 4; // The CR save area is always 4 bytes long. + } + + if (HasVRSAVESaveArea) { + // FIXME SVR4: Is it actually possible to have multiple elements in CSI + // which have the VRSAVE register class? + // Adjust the frame index of the VRSAVE spill slot. + for (unsigned i = 0, e = CSI.size(); i != e; ++i) { + const TargetRegisterClass *RC = CSI[i].getRegClass(); + + if (RC == PPC::VRSAVERCRegisterClass) { + int FI = CSI[i].getFrameIdx(); + + FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); + } + } + + LowerBound -= 4; // The VRSAVE save area is always 4 bytes long. + } + + if (HasVRSaveArea) { + // Insert alignment padding, we need 16-byte alignment. + LowerBound = (LowerBound - 15) & ~(15); + + for (unsigned i = 0, e = VRegs.size(); i != e; ++i) { + int FI = VRegs[i].getFrameIdx(); + + FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); + } + } +} + void PPCRegisterInfo::emitPrologue(MachineFunction &MF) const { MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB @@ -1041,7 +1206,18 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const { bool HasFP = hasFP(MF) && FrameSize; int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, IsMachoABI); - int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI); + + int FPOffset = 0; + if (HasFP) { + if (Subtarget.isELF32_ABI()) { + MachineFrameInfo *FFI = MF.getFrameInfo(); + int FPIndex = FI->getFramePointerSaveIndex(); + assert(FPIndex && "No Frame Pointer Save Slot!"); + FPOffset = FFI->getObjectOffset(FPIndex); + } else { + FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI); + } + } if (IsPPC64) { if (MustSaveLR) @@ -1250,7 +1426,18 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF, bool HasFP = hasFP(MF) && FrameSize; int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, IsMachoABI); - int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI); + + int FPOffset = 0; + if (HasFP) { + if (Subtarget.isELF32_ABI()) { + MachineFrameInfo *FFI = MF.getFrameInfo(); + int FPIndex = FI->getFramePointerSaveIndex(); + assert(FPIndex && "No Frame Pointer Save Slot!"); + FPOffset = FFI->getObjectOffset(FPIndex); + } else { + FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI); + } + } bool UsesTCRet = RetOpcode == PPC::TCRETURNri || RetOpcode == PPC::TCRETURNdi || diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h index 9506b651c5b..ddaefdd2a37 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.h +++ b/lib/Target/PowerPC/PPCRegisterInfo.h @@ -75,6 +75,8 @@ public: void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, RegScavenger *RS = NULL) const; + void processFunctionBeforeFrameFinalized(MachineFunction &MF) const; + void emitPrologue(MachineFunction &MF) const; void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const; diff --git a/lib/Target/PowerPC/PPCRegisterInfo.td b/lib/Target/PowerPC/PPCRegisterInfo.td index 9e15a55781c..79d41d362f6 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.td +++ b/lib/Target/PowerPC/PPCRegisterInfo.td @@ -290,7 +290,12 @@ def GPRC : RegisterClass<"PPC", [i32], 32, // On PPC64, r13 is the thread pointer. Never allocate this register. // Note that this is overconservative, as it also prevents allocation of // R31 when the FP is not needed. - if (MF.getTarget().getSubtarget().isPPC64()) + // When using the SVR4 ABI, r13 is reserved for the Small Data Area + // pointer. + const PPCSubtarget &Subtarget + = MF.getTarget().getSubtarget(); + + if (Subtarget.isPPC64() || Subtarget.isELF32_ABI()) return end()-5; // don't allocate R13, R31, R0, R1, LR if (needsFP(MF)) @@ -324,19 +329,24 @@ def G8RC : RegisterClass<"PPC", [i64], 64, }]; } - - +// Allocate volatiles first, then non-volatiles in reverse order. With the SVR4 +// ABI the size of the Floating-point register save area is determined by the +// allocated non-volatile register with the lowest register number, as FP +// register N is spilled to offset 8 * (32 - N) below the back chain word of the +// previous stack frame. By allocating non-volatiles in reverse order we make +// sure that the Floating-point register save area is always as small as +// possible because there aren't any unused spill slots. def F8RC : RegisterClass<"PPC", [f64], 64, [F0, F1, F2, F3, F4, F5, F6, F7, - F8, F9, F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, F20, F21, - F22, F23, F24, F25, F26, F27, F28, F29, F30, F31]>; + F8, F9, F10, F11, F12, F13, F31, F30, F29, F28, F27, F26, F25, F24, F23, + F22, F21, F20, F19, F18, F17, F16, F15, F14]>; def F4RC : RegisterClass<"PPC", [f32], 32, [F0, F1, F2, F3, F4, F5, F6, F7, - F8, F9, F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, F20, F21, - F22, F23, F24, F25, F26, F27, F28, F29, F30, F31]>; + F8, F9, F10, F11, F12, F13, F31, F30, F29, F28, F27, F26, F25, F24, F23, + F22, F21, F20, F19, F18, F17, F16, F15, F14]>; def VRRC : RegisterClass<"PPC", [v16i8,v8i16,v4i32,v4f32], 128, [V2, V3, V4, V5, V0, V1, - V6, V7, V8, V9, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V20, V21, - V22, V23, V24, V25, V26, V27, V28, V29, V30, V31]>; + V6, V7, V8, V9, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V31, V30, + V29, V28, V27, V26, V25, V24, V23, V22, V21, V20]>; def CRRC : RegisterClass<"PPC", [i32], 32, [CR0, CR1, CR5, CR6, CR7, CR2, CR3, CR4]>; @@ -358,3 +368,5 @@ def CRBITRC : RegisterClass<"PPC", [i32], 32, def CTRRC : RegisterClass<"PPC", [i32], 32, [CTR]>; def CTRRC8 : RegisterClass<"PPC", [i64], 64, [CTR8]>; +def VRSAVERC : RegisterClass<"PPC", [i32], 32, [VRSAVE]>; +