[X86] Declare SSE4.1/AVX2 vector extloads covered by PMOV[SZ]X legal.

Now that we can fully specify extload legality, we can declare them
legal for the PMOVSX/PMOVZX instructions.  This for instance enables
a DAGCombine to fire on code such as
  (and (<zextload-equivalent> ...), <redundant mask>)
to turn it into:
  (zextload ...)
as seen in the testcase changes.

There is one regression, in widen_load-2.ll: we're no longer able
to do store-to-load forwarding with illegal extload memory types.
This will be addressed separately.

Differential Revision: http://reviews.llvm.org/D6533


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@226676 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Ahmed Bougacha 2015-01-21 17:07:06 +00:00
parent b727b13f61
commit 34288d885e
5 changed files with 74 additions and 15 deletions

View File

@ -1136,6 +1136,21 @@ void X86TargetLowering::resetOperationActions() {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom); setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
} }
// SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
// i8 and i16 vectors are custom because the source register and source // i8 and i16 vectors are custom because the source register and source
// source memory operand types are not the same width. f32 vectors are // source memory operand types are not the same width. f32 vectors are
// custom since the immediate controlling the insert encodes additional // custom since the immediate controlling the insert encodes additional
@ -1315,6 +1330,21 @@ void X86TargetLowering::resetOperationActions() {
// Custom CTPOP always performs better on natively supported v8i32 // Custom CTPOP always performs better on natively supported v8i32
setOperationAction(ISD::CTPOP, MVT::v8i32, Custom); setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
// AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
} else { } else {
setOperationAction(ISD::ADD, MVT::v4i64, Custom); setOperationAction(ISD::ADD, MVT::v4i64, Custom);
setOperationAction(ISD::ADD, MVT::v8i32, Custom); setOperationAction(ISD::ADD, MVT::v8i32, Custom);

View File

@ -6120,7 +6120,7 @@ defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>; defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
// AVX2 Patterns // AVX2 Patterns
multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, SDNode ExtOp> { multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, string ExtTy, SDNode ExtOp> {
// Register-Register patterns // Register-Register patterns
def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))), def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BWYrr) VR128:$src)>; (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
@ -6154,6 +6154,22 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, SDNode ExtOp> {
def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))), def : Pat<(v4i64 (ExtOp (v8i32 VR256:$src))),
(!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>; (!cast<I>(OpcPrefix#DQYrr) (EXTRACT_SUBREG VR256:$src, sub_xmm))>;
// Simple Register-Memory patterns
def : Pat<(v16i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
def : Pat<(v8i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
(!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
(!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
def : Pat<(v4i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
(!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
// AVX2 Register-Memory patterns // AVX2 Register-Memory patterns
def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))), def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
(!cast<I>(OpcPrefix#BWYrm) addr:$src)>; (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
@ -6211,13 +6227,13 @@ multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, SDNode ExtOp> {
} }
let Predicates = [HasAVX2] in { let Predicates = [HasAVX2] in {
defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", X86vsext>; defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", "s", X86vsext>;
defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", X86vzext>; defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", "z", X86vzext>;
} }
// SSE4.1/AVX patterns. // SSE4.1/AVX patterns.
multiclass SS41I_pmovx_patterns<string OpcPrefix, SDNode ExtOp, multiclass SS41I_pmovx_patterns<string OpcPrefix, string ExtTy,
PatFrag ExtLoad16> { SDNode ExtOp, PatFrag ExtLoad16> {
def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))), def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
(!cast<I>(OpcPrefix#BWrr) VR128:$src)>; (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))), def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
@ -6233,6 +6249,21 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))), def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
(!cast<I>(OpcPrefix#DQrr) VR128:$src)>; (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
def : Pat<(v8i16 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BWrm) addr:$src)>;
def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BDrm) addr:$src)>;
def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi8") addr:$src)),
(!cast<I>(OpcPrefix#BQrm) addr:$src)>;
def : Pat<(v4i32 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
(!cast<I>(OpcPrefix#WDrm) addr:$src)>;
def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi16") addr:$src)),
(!cast<I>(OpcPrefix#WQrm) addr:$src)>;
def : Pat<(v2i64 (!cast<PatFrag>(ExtTy#"extloadvi32") addr:$src)),
(!cast<I>(OpcPrefix#DQrm) addr:$src)>;
def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))), def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
(!cast<I>(OpcPrefix#BWrm) addr:$src)>; (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))), def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
@ -6295,13 +6326,13 @@ multiclass SS41I_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
} }
let Predicates = [HasAVX] in { let Predicates = [HasAVX] in {
defm : SS41I_pmovx_patterns<"VPMOVSX", X86vsext, extloadi32i16>; defm : SS41I_pmovx_patterns<"VPMOVSX", "s", X86vsext, extloadi32i16>;
defm : SS41I_pmovx_patterns<"VPMOVZX", X86vzext, loadi16_anyext>; defm : SS41I_pmovx_patterns<"VPMOVZX", "z", X86vzext, loadi16_anyext>;
} }
let Predicates = [UseSSE41] in { let Predicates = [UseSSE41] in {
defm : SS41I_pmovx_patterns<"PMOVSX", X86vsext, extloadi32i16>; defm : SS41I_pmovx_patterns<"PMOVSX", "s", X86vsext, extloadi32i16>;
defm : SS41I_pmovx_patterns<"PMOVZX", X86vzext, loadi16_anyext>; defm : SS41I_pmovx_patterns<"PMOVZX", "z", X86vzext, loadi16_anyext>;
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -15,8 +15,6 @@ define void @test_avx2_pmovx_256(<8 x i8>* %tmp64, <8 x float>* %tmp75) {
; CHECK-LABEL: test_avx2_pmovx_256 ; CHECK-LABEL: test_avx2_pmovx_256
; We really don't care about the generated code. ; We really don't care about the generated code.
; CHECK: vpmovzxbd ; CHECK: vpmovzxbd
; CHECK: vpbroadcastd
; CHECK: vpand
; CHECK: vcvtdq2ps ; CHECK: vcvtdq2ps
; CHECK: vmovups ; CHECK: vmovups
; CHECK: vzeroupper ; CHECK: vzeroupper

View File

@ -81,8 +81,7 @@ define <4 x i32*> @INT2PTR1(<4 x i8>* %p) nounwind {
entry: entry:
%G = load <4 x i8>* %p %G = load <4 x i8>* %p
;CHECK: movl ;CHECK: movl
;CHECK: pmovzxbd ;CHECK: pmovzxbd (%
;CHECK: pand
%K = inttoptr <4 x i8> %G to <4 x i32*> %K = inttoptr <4 x i8> %G to <4 x i32*>
;CHECK: ret ;CHECK: ret
ret <4 x i32*> %K ret <4 x i32*> %K

View File

@ -191,8 +191,9 @@ define void @rot(%i8vec3pack* nocapture sret %result, %i8vec3pack* %X, %i8vec3pa
; CHECK-NEXT: movd %[[CONSTANT1]], %e[[R1:[abcd]]]x ; CHECK-NEXT: movd %[[CONSTANT1]], %e[[R1:[abcd]]]x
; CHECK-NEXT: movw %[[R1]]x, (%[[PTR1:.*]]) ; CHECK-NEXT: movw %[[R1]]x, (%[[PTR1:.*]])
; CHECK-NEXT: movb $1, 2(%[[PTR1]]) ; CHECK-NEXT: movb $1, 2(%[[PTR1]])
; CHECK-NEXT: pmovzxbd (%[[PTR0]]), %[[X0:xmm[0-9]+]] ; CHECK-NEXT: movl (%[[PTR0]]), [[TMP1:%e[abcd]+x]]
; CHECK-NEXT: pand {{.*}}, %[[X0]] ; CHECK-NEXT: movl [[TMP1]], [[TMP2:.*]]
; CHECK-NEXT: pmovzxbd [[TMP2]], %[[X0:xmm[0-9]+]]
; CHECK-NEXT: pextrd $1, %[[X0]], %e[[R0:[abcd]]]x ; CHECK-NEXT: pextrd $1, %[[X0]], %e[[R0:[abcd]]]x
; CHECK-NEXT: shrl %e[[R0]]x ; CHECK-NEXT: shrl %e[[R0]]x
; CHECK-NEXT: movd %[[X0]], %e[[R1:[abcd]]]x ; CHECK-NEXT: movd %[[X0]], %e[[R1:[abcd]]]x