[X86] Improve the lowering of BITCAST dag nodes from type f64 to type v2i32 (and vice versa).

Before this patch, the backend always emitted a store+load sequence to
bitconvert from f64 to i64 the input operand of a ISD::BITCAST dag node that
performed a bitconvert from type MVT::f64 to type MVT::v2i32. The resulting
i64 node was then used to build a v2i32 vector.

With this patch, the backend now produces a cheaper SCALAR_TO_VECTOR from
MVT::f64 to MVT::v2f64. That SCALAR_TO_VECTOR is then followed by a "free"
bitcast to type MVT::v4i32. The elements of the resulting
v4i32 are then extracted to build a v2i32 vector (which is illegal and
therefore promoted to MVT::v2i64).

This is in general cheaper than emitting a stack store+load sequence
to bitconvert the operand from type f64 to type i64.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@208107 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Andrea Di Biagio 2014-05-06 17:09:03 +00:00
parent 22f779d1fd
commit 8a712ba229
3 changed files with 124 additions and 3 deletions

View File

@ -1038,6 +1038,8 @@ void X86TargetLowering::resetOperationActions() {
setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal);
setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
}
if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
@ -14091,6 +14093,25 @@ static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
MVT SrcVT = Op.getOperand(0).getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
if (SrcVT == MVT::v2i32) {
assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
if (DstVT != MVT::f64)
// This conversion needs to be expanded.
return SDValue();
SDLoc dl(Op);
SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
Op->getOperand(0), DAG.getIntPtrConstant(0));
SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
Op->getOperand(0), DAG.getIntPtrConstant(1));
SDValue Elts[] = {Elt0, Elt1, Elt0, Elt0};
SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Elts);
SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
DAG.getIntPtrConstant(0));
}
assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
Subtarget->hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
@ -14546,8 +14567,27 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
ReplaceATOMIC_BINARY_64(N, Results, DAG, Opc);
return;
}
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_LOAD: {
ReplaceATOMIC_LOAD(N, Results, DAG);
return;
}
case ISD::BITCAST: {
assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
EVT DstVT = N->getValueType(0);
EVT SrcVT = N->getOperand(0)->getValueType(0);
if (SrcVT == MVT::f64 && DstVT == MVT::v2i32) {
SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
MVT::v2f64, N->getOperand(0));
SDValue ToV4I32 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Expanded);
SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
ToV4I32, DAG.getIntPtrConstant(0));
SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
ToV4I32, DAG.getIntPtrConstant(1));
SDValue Elts[] = {Elt0, Elt1};
Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Elts));
}
}
}
}

View File

@ -0,0 +1,80 @@
; RUN: llc < %s -march=x86-64 -mcpu=core2 -mattr=+sse2 | FileCheck %s
define double @test1(double %A) {
%1 = bitcast double %A to <2 x i32>
%add = add <2 x i32> %1, <i32 3, i32 5>
%2 = bitcast <2 x i32> %add to double
ret double %2
}
; FIXME: Ideally we should be able to fold the entire body of @test1 into a
; single paddd instruction. At the moment we produce the sequence
; pshufd+paddq+pshufd.
; CHECK-LABEL: test1
; CHECK-NOT: movsd
; CHECK: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK-NEXT: ret
define double @test2(double %A, double %B) {
%1 = bitcast double %A to <2 x i32>
%2 = bitcast double %B to <2 x i32>
%add = add <2 x i32> %1, %2
%3 = bitcast <2 x i32> %add to double
ret double %3
}
; FIXME: Ideally we should be able to fold the entire body of @test2 into a
; single 'paddd %xmm1, %xmm0' instruction. At the moment we produce the
; sequence pshufd+pshufd+paddq+pshufd.
; CHECK-LABEL: test2
; CHECK-NOT: movsd
; CHECK: pshufd
; CHECK-NEXT: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK-NEXT: ret
define i64 @test3(i64 %A) {
%1 = bitcast i64 %A to <2 x float>
%add = fadd <2 x float> %1, <float 3.0, float 5.0>
%2 = bitcast <2 x float> %add to i64
ret i64 %2
}
; CHECK-LABEL: test3
; CHECK-NOT: pshufd
; CHECK: addps
; CHECK-NOT: pshufd
; CHECK: ret
define i64 @test4(i64 %A) {
%1 = bitcast i64 %A to <2 x i32>
%add = add <2 x i32> %1, <i32 3, i32 5>
%2 = bitcast <2 x i32> %add to i64
ret i64 %2
}
; FIXME: At the moment we still produce the sequence pshufd+paddq+pshufd.
; Ideally, we should fold that sequence into a single paddd.
; CHECK-LABEL: test4
; CHECK: pshufd
; CHECK-NEXT: paddq
; CHECK-NEXT: pshufd
; CHECK: ret
define double @test5(double %A) {
%1 = bitcast double %A to <2 x float>
%add = fadd <2 x float> %1, <float 3.0, float 5.0>
%2 = bitcast <2 x float> %add to double
ret double %2
}
; CHECK-LABEL: test5
; CHECK: addps
; CHECK-NEXT: ret

View File

@ -33,7 +33,8 @@ define <2 x i32> @t3() nounwind {
define double @t4() nounwind {
ret double bitcast (<2 x i32> <i32 1, i32 0> to double)
; CHECK-LABEL: t4:
; CHECK: movl $1
; CHECK: movd {{.*}}, %xmm0
; CHECK-NOT: movl $1
; CHECK-NOT: pshufd
; CHECK: movsd {{.*}}, %xmm0
}