[x86] Teach the new vector shuffle lowering to use VPERMILPD for

single-input shuffles with doubles. This allows them to fold memory
operands into the shuffle, etc. This is just the analog to the v4f32
case in my prior commit.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218193 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chandler Carruth 2014-09-20 22:09:27 +00:00
parent 479d0ba62b
commit ae464b2ba1
3 changed files with 35 additions and 7 deletions

View File

@ -7657,6 +7657,14 @@ static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
// Straight shuffle of a single input vector. Simulate this by using the
// single input as both of the "inputs" to this instruction..
unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
if (Subtarget->hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILP, DL, MVT::v2f64, V1,
DAG.getConstant(SHUFPDMask, MVT::i8));
}
return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
DAG.getConstant(SHUFPDMask, MVT::i8));
}

View File

@ -70,9 +70,13 @@ define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {
ret <2 x double> %shuffle
}
define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) {
; ALL-LABEL: @shuffle_v2f64_10
; ALL: shufpd {{.*}} # xmm0 = xmm0[1,0]
; ALL-NEXT: retq
; SSE-LABEL: @shuffle_v2f64_10
; SSE: shufpd {{.*}} # xmm0 = xmm0[1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: @shuffle_v2f64_10
; AVX: vpermilpd {{.*}} # xmm0 = xmm0[1,0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 0>
ret <2 x double> %shuffle
}
@ -112,7 +116,7 @@ define <2 x double> @shuffle_v2f64_32(<2 x double> %a, <2 x double> %b) {
; SSE-NEXT: retq
;
; AVX-LABEL: @shuffle_v2f64_32
; AVX: vshufpd {{.*}} # xmm0 = xmm1[1,0]
; AVX: vpermilpd {{.*}} # xmm0 = xmm1[1,0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 2>
ret <2 x double> %shuffle
@ -520,3 +524,19 @@ define <2 x double> @insert_dup_mem_v2f64(double* %ptr) {
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 0, i32 0>
ret <2 x double> %shuffle
}
define <2 x double> @shuffle_mem_v2f64_10(<2 x double>* %ptr) {
; SSE-LABEL: @shuffle_mem_v2f64_10
; SSE: # BB#0:
; SSE-NEXT: movapd (%rdi), %xmm0
; SSE-NEXT: shufpd {{.*}} # xmm0 = xmm0[1,0]
; SSE-NEXT: retq
;
; AVX-LABEL: @shuffle_mem_v2f64_10
; AVX: # BB#0:
; AVX-NEXT: vpermilpd {{.*}} # xmm0 = mem[1,0]
; AVX-NEXT: retq
%a = load <2 x double>* %ptr
%shuffle = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 1, i32 0>
ret <2 x double> %shuffle
}

View File

@ -129,7 +129,7 @@ define <4 x double> @shuffle_v4f64_0300(<4 x double> %a, <4 x double> %b) {
define <4 x double> @shuffle_v4f64_1000(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: @shuffle_v4f64_1000
; AVX1: # BB#0:
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm0[1,0]
; AVX1-NEXT: vpermilpd {{.*}} # xmm1 = xmm0[1,0]
; AVX1-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@ -162,8 +162,8 @@ define <4 x double> @shuffle_v4f64_3210(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: @shuffle_v4f64_3210
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm1[1,0]
; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm0[1,0]
; AVX1-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0]
; AVX1-NEXT: vpermilpd {{.*}} # xmm0 = xmm0[1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>