Teach x86 target -soft-float.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@64496 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2009-02-13 22:36:38 +00:00
parent 03ce042d70
commit c7ce29b04f
2 changed files with 66 additions and 30 deletions

View File

@ -375,7 +375,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
else
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
if (X86ScalarSSEf64) {
if (!UseSoftFloat && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
@ -413,7 +413,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setConvertAction(MVT::f80, MVT::f32, Expand);
setConvertAction(MVT::f80, MVT::f64, Expand);
}
} else if (X86ScalarSSEf32) {
} else if (!UseSoftFloat && X86ScalarSSEf32) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
@ -458,7 +458,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
}
} else {
} else if (!UseSoftFloat) {
// f32 and f64 in x87.
// Set up the FP register classes.
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
@ -493,28 +493,30 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
// Long double always uses X87.
addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
{
bool ignored;
APFloat TmpFlt(+0.0);
TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
&ignored);
addLegalFPImmediate(TmpFlt); // FLD0
TmpFlt.changeSign();
addLegalFPImmediate(TmpFlt); // FLD0/FCHS
APFloat TmpFlt2(+1.0);
TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
&ignored);
addLegalFPImmediate(TmpFlt2); // FLD1
TmpFlt2.changeSign();
addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
}
if (!UseSoftFloat) {
addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
{
bool ignored;
APFloat TmpFlt(+0.0);
TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
&ignored);
addLegalFPImmediate(TmpFlt); // FLD0
TmpFlt.changeSign();
addLegalFPImmediate(TmpFlt); // FLD0/FCHS
APFloat TmpFlt2(+1.0);
TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
&ignored);
addLegalFPImmediate(TmpFlt2); // FLD1
TmpFlt2.changeSign();
addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
}
if (!UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f80 , Expand);
setOperationAction(ISD::FCOS , MVT::f80 , Expand);
if (!UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f80 , Expand);
setOperationAction(ISD::FCOS , MVT::f80 , Expand);
}
}
// Always use a library call for pow.
@ -578,7 +580,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand);
}
if (!DisableMMX && Subtarget->hasMMX()) {
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) {
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
@ -660,7 +664,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::v1i64, Custom);
}
if (Subtarget->hasSSE1()) {
if (!UseSoftFloat && Subtarget->hasSSE1()) {
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
@ -677,8 +681,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VSETCC, MVT::v4f32, Custom);
}
if (Subtarget->hasSSE2()) {
if (!UseSoftFloat && Subtarget->hasSSE2()) {
addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
// FIXME: Unfortunately -soft-float means XMM registers cannot be used even
// for integer operations.
addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
@ -756,7 +763,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
}
if (Subtarget->hasSSE41()) {
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
@ -1399,9 +1406,11 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs,
TotalNumXMMRegs);
assert((Subtarget->hasSSE1() || !NumXMMRegs) &&
assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
if (!Subtarget->hasSSE1()) {
assert(!(NumXMMRegs && UseSoftFloat) &&
"SSE register cannot be used when SSE is disabled!");
if (UseSoftFloat || !Subtarget->hasSSE1()) {
// Kernel mode asks for SSE to be disabled, so don't push them
// on the stack.
TotalNumXMMRegs = 0;

View File

@ -0,0 +1,27 @@
; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 -soft-float | not grep xmm
; RUN: llvm-as < %s | llc -march=x86-64 -mattr=+sse2 -soft-float | not grep xmm
%struct.__va_list_tag = type { i32, i32, i8*, i8* }
define i32 @t1(i32 %a, ...) nounwind {
entry:
%va = alloca [1 x %struct.__va_list_tag], align 8 ; <[1 x %struct.__va_list_tag]*> [#uses=2]
%va12 = bitcast [1 x %struct.__va_list_tag]* %va to i8* ; <i8*> [#uses=2]
call void @llvm.va_start(i8* %va12)
%va3 = getelementptr [1 x %struct.__va_list_tag]* %va, i64 0, i64 0 ; <%struct.__va_list_tag*> [#uses=1]
call void @bar(%struct.__va_list_tag* %va3) nounwind
call void @llvm.va_end(i8* %va12)
ret i32 undef
}
declare void @llvm.va_start(i8*) nounwind
declare void @bar(%struct.__va_list_tag*)
declare void @llvm.va_end(i8*) nounwind
define float @t2(float %a, float %b) nounwind readnone {
entry:
%0 = add float %a, %b ; <float> [#uses=1]
ret float %0
}