diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 2f14d790742..380017feff6 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1038,6 +1038,8 @@ void X86TargetLowering::resetOperationActions() { setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); + + setOperationAction(ISD::BITCAST, MVT::v2i32, Custom); } if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) { @@ -14091,6 +14093,25 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget, SelectionDAG &DAG) { MVT SrcVT = Op.getOperand(0).getSimpleValueType(); MVT DstVT = Op.getSimpleValueType(); + + if (SrcVT == MVT::v2i32) { + assert(Subtarget->hasSSE2() && "Requires at least SSE2!"); + if (DstVT != MVT::f64) + // This conversion needs to be expanded. + return SDValue(); + + SDLoc dl(Op); + SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, + Op->getOperand(0), DAG.getIntPtrConstant(0)); + SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, + Op->getOperand(0), DAG.getIntPtrConstant(1)); + SDValue Elts[] = {Elt0, Elt1, Elt0, Elt0}; + SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Elts); + SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV); + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64, + DAG.getIntPtrConstant(0)); + } + assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && Subtarget->hasMMX() && "Unexpected custom BITCAST"); assert((DstVT == MVT::i64 || @@ -14546,8 +14567,27 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc); return; } - case ISD::ATOMIC_LOAD: + case ISD::ATOMIC_LOAD: { ReplaceATOMIC_LOAD(N, Results, DAG); + return; + } + case ISD::BITCAST: { + assert(Subtarget->hasSSE2() && "Requires at least SSE2!"); + EVT DstVT = N->getValueType(0); + EVT SrcVT = N->getOperand(0)->getValueType(0); + + if (SrcVT == MVT::f64 && DstVT == MVT::v2i32) { + SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, + MVT::v2f64, N->getOperand(0)); + SDValue ToV4I32 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Expanded); + SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, + ToV4I32, DAG.getIntPtrConstant(0)); + SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, + ToV4I32, DAG.getIntPtrConstant(1)); + SDValue Elts[] = {Elt0, Elt1}; + Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Elts)); + } + } } } diff --git a/test/CodeGen/X86/lower-bitcast-v2i32.ll b/test/CodeGen/X86/lower-bitcast-v2i32.ll new file mode 100644 index 00000000000..1c0de630ef8 --- /dev/null +++ b/test/CodeGen/X86/lower-bitcast-v2i32.ll @@ -0,0 +1,80 @@ +; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s + + +define double @test1(double %A) { + %1 = bitcast double %A to <2 x i32> + %add = add <2 x i32> %1, + %2 = bitcast <2 x i32> %add to double + ret double %2 +} +; FIXME: Ideally we should be able to fold the entire body of @test1 into a +; single paddd instruction. At the moment we produce the sequence +; pshufd+paddq+pshufd. + +; CHECK-LABEL: test1 +; CHECK-NOT: movsd +; CHECK: pshufd +; CHECK-NEXT: paddq +; CHECK-NEXT: pshufd +; CHECK-NEXT: ret + + +define double @test2(double %A, double %B) { + %1 = bitcast double %A to <2 x i32> + %2 = bitcast double %B to <2 x i32> + %add = add <2 x i32> %1, %2 + %3 = bitcast <2 x i32> %add to double + ret double %3 +} +; FIXME: Ideally we should be able to fold the entire body of @test2 into a +; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the +; sequence pshufd+pshufd+paddq+pshufd. + +; CHECK-LABEL: test2 +; CHECK-NOT: movsd +; CHECK: pshufd +; CHECK-NEXT: pshufd +; CHECK-NEXT: paddq +; CHECK-NEXT: pshufd +; CHECK-NEXT: ret + + +define i64 @test3(i64 %A) { + %1 = bitcast i64 %A to <2 x float> + %add = fadd <2 x float> %1, + %2 = bitcast <2 x float> %add to i64 + ret i64 %2 +} +; CHECK-LABEL: test3 +; CHECK-NOT: pshufd +; CHECK: addps +; CHECK-NOT: pshufd +; CHECK: ret + + +define i64 @test4(i64 %A) { + %1 = bitcast i64 %A to <2 x i32> + %add = add <2 x i32> %1, + %2 = bitcast <2 x i32> %add to i64 + ret i64 %2 +} +; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd. +; Ideally, we should fold that sequence into a single paddd. + +; CHECK-LABEL: test4 +; CHECK: pshufd +; CHECK-NEXT: paddq +; CHECK-NEXT: pshufd +; CHECK: ret + + +define double @test5(double %A) { + %1 = bitcast double %A to <2 x float> + %add = fadd <2 x float> %1, + %2 = bitcast <2 x float> %add to double + ret double %2 +} +; CHECK-LABEL: test5 +; CHECK: addps +; CHECK-NEXT: ret + diff --git a/test/CodeGen/X86/ret-mmx.ll b/test/CodeGen/X86/ret-mmx.ll index 091fd539849..7adf307ce0f 100644 --- a/test/CodeGen/X86/ret-mmx.ll +++ b/test/CodeGen/X86/ret-mmx.ll @@ -33,7 +33,8 @@ define <2 x i32> @t3() nounwind { define double @t4() nounwind { ret double bitcast (<2 x i32> to double) ; CHECK-LABEL: t4: -; CHECK: movl $1 -; CHECK: movd {{.*}}, %xmm0 +; CHECK-NOT: movl $1 +; CHECK-NOT: pshufd +; CHECK: movsd {{.*}}, %xmm0 }