mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-08-05 13:26:55 +00:00
Formalize the notion that AVX and SSE are non-overlapping extensions from the compiler's point of view. Per email discussion, we either want to always use VEX-prefixed instructions or never use them, and are taking "HasAVX" to mean "Always use VEX". Passing -mattr=-avx,+sse42 should serve to restore legacy SSE support when desirable.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@121439 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -61,7 +61,7 @@ def RetCC_X86_32_C : CallingConv<[
|
||||
// weirdly; this is really the sse-regparm calling convention) in which
|
||||
// case they use XMM0, otherwise it is the same as the common X86 calling
|
||||
// conv.
|
||||
CCIfInReg<CCIfSubtarget<"hasSSE2()",
|
||||
CCIfInReg<CCIfSubtarget<"hasXMMInt()",
|
||||
CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
|
||||
CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>,
|
||||
CCDelegateTo<RetCC_X86Common>
|
||||
@@ -73,8 +73,8 @@ def RetCC_X86_32_Fast : CallingConv<[
|
||||
// SSE2.
|
||||
// This can happen when a float, 2 x float, or 3 x float vector is split by
|
||||
// target lowering, and is returned in 1-3 sse regs.
|
||||
CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
|
||||
CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
|
||||
CCIfType<[f32], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
|
||||
CCIfType<[f64], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
|
||||
|
||||
// For integers, ECX can be used as an extra return register
|
||||
CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>,
|
||||
@@ -163,12 +163,12 @@ def CC_X86_64_C : CallingConv<[
|
||||
// registers on Darwin.
|
||||
CCIfType<[x86mmx],
|
||||
CCIfSubtarget<"isTargetDarwin()",
|
||||
CCIfSubtarget<"hasSSE2()",
|
||||
CCIfSubtarget<"hasXMMInt()",
|
||||
CCPromoteToType<v2i64>>>>,
|
||||
|
||||
// The first 8 FP/Vector arguments are passed in XMM registers.
|
||||
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
||||
CCIfSubtarget<"hasSSE1()",
|
||||
CCIfSubtarget<"hasXMM()",
|
||||
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
|
||||
|
||||
// The first 8 256-bit vector arguments are passed in YMM registers.
|
||||
@@ -245,7 +245,7 @@ def CC_X86_64_GHC : CallingConv<[
|
||||
|
||||
// Pass in STG registers: F1, F2, F3, F4, D1, D2
|
||||
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
||||
CCIfSubtarget<"hasSSE1()",
|
||||
CCIfSubtarget<"hasXMM()",
|
||||
CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
|
||||
]>;
|
||||
|
||||
@@ -263,7 +263,7 @@ def CC_X86_32_Common : CallingConv<[
|
||||
// The first 3 float or double arguments, if marked 'inreg' and if the call
|
||||
// is not a vararg call and if SSE2 is available, are passed in SSE registers.
|
||||
CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
|
||||
CCIfSubtarget<"hasSSE2()",
|
||||
CCIfSubtarget<"hasXMMInt()",
|
||||
CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
|
||||
|
||||
// The first 3 __m64 (except for v1i64) vector arguments are passed in mmx
|
||||
@@ -362,7 +362,7 @@ def CC_X86_32_FastCC : CallingConv<[
|
||||
// The first 3 float or double arguments, if the call is not a vararg
|
||||
// call and if SSE2 is available, are passed in SSE registers.
|
||||
CCIfNotVarArg<CCIfType<[f32,f64],
|
||||
CCIfSubtarget<"hasSSE2()",
|
||||
CCIfSubtarget<"hasXMMInt()",
|
||||
CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
|
||||
|
||||
// Doubles get 8-byte slots that are 8-byte aligned.
|
||||
|
Reference in New Issue
Block a user