From ae16ff1c4266a17e9a46940453a64c0889eb49f7 Mon Sep 17 00:00:00 2001 From: Andrea Di Biagio Date: Mon, 8 Dec 2014 14:36:51 +0000 Subject: [PATCH] [X86] Improved lowering of packed v8i16 vector shifts by non-constant count. Before this patch, the backend sub-optimally expanded the non-constant shift count of a v8i16 shift into a sequence of two 'movd' plus 'movzwl'. With this patch the backend checks if the target features sse4.1. If so, then it lets the shuffle legalizer deal with the expansion of the shift amount. Example: ;; define <8 x i16> @test(<8 x i16> %A, <8 x i16> %B) { %shamt = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer %shl = shl <8 x i16> %A, %shamt ret <8 x i16> %shl } ;; Before (with -mattr=+avx): vmovd %xmm1, %eax movzwl %ax, %eax vmovd %eax, %xmm1 vpsllw %xmm1, %xmm0, %xmm0 retq Now: vpxor %xmm2, %xmm2, %xmm2 vpblendw $1, %xmm1, %xmm2, %xmm1 vpsllw %xmm1, %xmm0, %xmm0 retq git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@223660 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 30 ++++++++++++++++++--------- test/CodeGen/X86/lower-vec-shift-2.ll | 15 ++++++-------- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4c65a6ccd7e..6b4251df61a 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -16720,18 +16720,28 @@ static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT, case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; } - // Need to build a vector containing shift amount. - // SSE/AVX packed shifts only use the lower 64-bit of the shift count. - SmallVector ShOps; - ShOps.push_back(ShAmt); - if (SVT == MVT::i32) { - ShOps.push_back(DAG.getConstant(0, SVT)); + const X86Subtarget &Subtarget = + DAG.getTarget().getSubtarget(); + if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND && + ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) { + // Let the shuffle legalizer expand this shift amount node. + SDValue Op0 = ShAmt.getOperand(0); + Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0); + ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG); + } else { + // Need to build a vector containing shift amount. + // SSE/AVX packed shifts only use the lower 64-bit of the shift count. + SmallVector ShOps; + ShOps.push_back(ShAmt); + if (SVT == MVT::i32) { + ShOps.push_back(DAG.getConstant(0, SVT)); + ShOps.push_back(DAG.getUNDEF(SVT)); + } ShOps.push_back(DAG.getUNDEF(SVT)); - } - ShOps.push_back(DAG.getUNDEF(SVT)); - MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64; - ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps); + MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64; + ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps); + } // The return type has to be a 128-bit type with the same element // type as the input type. diff --git a/test/CodeGen/X86/lower-vec-shift-2.ll b/test/CodeGen/X86/lower-vec-shift-2.ll index 90505b6dd8f..770775d3242 100644 --- a/test/CodeGen/X86/lower-vec-shift-2.ll +++ b/test/CodeGen/X86/lower-vec-shift-2.ll @@ -11,9 +11,8 @@ define <8 x i16> @test1(<8 x i16> %A, <8 x i16> %B) { ; SSE2-NEXT: retq ; AVX-LABEL: test1: ; AVX: # BB#0 -; AVX-NEXT: vmovd %xmm1, %eax -; AVX-NEXT: movzwl %ax, %eax -; AVX-NEXT: vmovd %eax, %xmm1 +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7] ; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq entry: @@ -66,9 +65,8 @@ define <8 x i16> @test4(<8 x i16> %A, <8 x i16> %B) { ; SSE2-NEXT: retq ; AVX-LABEL: test4: ; AVX: # BB#0 -; AVX-NEXT: vmovd %xmm1, %eax -; AVX-NEXT: movzwl %ax, %eax -; AVX-NEXT: vmovd %eax, %xmm1 +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7] ; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq entry: @@ -121,9 +119,8 @@ define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) { ; SSE2-NEXT: retq ; AVX-LABEL: test7: ; AVX: # BB#0 -; AVX-NEXT: vmovd %xmm1, %eax -; AVX-NEXT: movzwl %ax, %eax -; AVX-NEXT: vmovd %eax, %xmm1 +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7] ; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq entry: