From e3ef744d3e3557ee286d2ed3bf8306e5fbbd1ac4 Mon Sep 17 00:00:00 2001 From: Dale Johannesen Date: Sat, 10 Nov 2007 22:07:15 +0000 Subject: [PATCH] Add CCAssignToStackABISizeAlign for convenience in dealing with types whose size & alignment are different on different subtargets. Use it for x86 f80. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43988 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/TargetCallingConv.td | 7 +++++++ lib/Target/X86/X86CallingConv.td | 13 ++++++++----- utils/TableGen/CallingConvEmitter.cpp | 9 +++++++++ 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/lib/Target/TargetCallingConv.td b/lib/Target/TargetCallingConv.td index 176a848ec02..05029a01a6d 100644 --- a/lib/Target/TargetCallingConv.td +++ b/lib/Target/TargetCallingConv.td @@ -66,6 +66,13 @@ class CCAssignToStack : CCAction { int Align = align; } +/// CCAssignToStackABISizeAlign - This action always matches: it assigns +/// the value to a stack slot of the ABISize and ABIAlignment for the type, +/// which may depend on the target or subtarget. +/// "ignored" is here because an empty arg list does not work. +class CCAssignToStackABISizeAlign : CCAction { +} + /// CCStructAssign - This action always matches: it will use the C ABI and /// the register availability to decided whether to assign to a set of /// registers or to a stack slot. diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index 5c8f75e495a..97d4636b814 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -118,10 +118,12 @@ def CC_X86_64_C : CallingConv<[ // 8-byte aligned if there are no more registers to hold them. CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>, - // Long doubles get 16-byte stack slots that are 16-byte aligned. + // Long doubles get stack slots whose size and alignment depends on the + // subtarget. + CCIfType<[f80], CCAssignToStackABISizeAlign<0>>, + // Vectors get 16-byte stack slots that are 16-byte aligned. - CCIfType<[f80, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], - CCAssignToStack<16, 16>>, + CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, // __m64 vectors get 8-byte stack slots that are 8-byte aligned. CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>> @@ -177,8 +179,9 @@ def CC_X86_32_Common : CallingConv<[ // Doubles get 8-byte slots that are 4-byte aligned. CCIfType<[f64], CCAssignToStack<8, 4>>, - // Long doubles get 16-byte slots that are 4-byte aligned. - CCIfType<[f80], CCAssignToStack<16, 4>>, + // Long doubles get slots whose size and alignment depends on the + // subtarget. + CCIfType<[f80], CCAssignToStackABISizeAlign<0>>, // The first 4 vector arguments are passed in XMM registers. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], diff --git a/utils/TableGen/CallingConvEmitter.cpp b/utils/TableGen/CallingConvEmitter.cpp index 5c88b5fc7a4..79e3a9c6cef 100644 --- a/utils/TableGen/CallingConvEmitter.cpp +++ b/utils/TableGen/CallingConvEmitter.cpp @@ -120,6 +120,15 @@ void CallingConvEmitter::EmitAction(Record *Action, O << IndentStr << "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset" << Counter << ", LocVT, LocInfo));\n"; O << IndentStr << "return false;\n"; + } else if (Action->isSubClassOf("CCAssignToStackABISizeAlign")) { + O << IndentStr << "unsigned Offset" << ++Counter + << " = State.AllocateStack(State.getTarget().getTargetData()" + "->getABITypeSize(MVT::getTypeForValueType(LocVT)),\n"; + O << IndentStr << " State.getTarget().getTargetData()" + "->getABITypeAlignment(MVT::getTypeForValueType(LocVT)));\n"; + O << IndentStr << "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset" + << Counter << ", LocVT, LocInfo));\n"; + O << IndentStr << "return false;\n"; } else if (Action->isSubClassOf("CCPromoteToType")) { Record *DestTy = Action->getValueAsDef("DestTy"); O << IndentStr << "LocVT = " << getEnumName(getValueType(DestTy)) <<";\n";