diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index ba444af4882..a18f550a16e 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -1332,6 +1332,8 @@ let Predicates = [HasAVX] in { (bc_v4i32 (v2i64 (X86vzload addr:$src2)))), (VMOVHPSrm VR128:$src1, addr:$src2)>; + // VMOVHPD patterns + // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem // is during lowering, where it's not possible to recognize the load fold // cause it has two uses through a bitcast. One use disappears at isel time @@ -1344,6 +1346,11 @@ let Predicates = [HasAVX] in { def : Pat<(v2f64 (X86Unpckl VR128:$src1, (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))), (VMOVHPDrm VR128:$src1, addr:$src2)>; + + def : Pat<(store (f64 (vector_extract + (v2f64 (X86VPermilpi VR128:$src, (i8 1))), + (iPTR 0))), addr:$dst), + (VMOVHPDmr addr:$dst, VR128:$src)>; } let Predicates = [UseSSE1] in { @@ -1357,6 +1364,8 @@ let Predicates = [UseSSE1] in { } let Predicates = [UseSSE2] in { + // MOVHPD patterns + // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem // is during lowering, where it's not possible to recognize the load fold // cause it has two uses through a bitcast. One use disappears at isel time @@ -1369,6 +1378,11 @@ let Predicates = [UseSSE2] in { def : Pat<(v2f64 (X86Unpckl VR128:$src1, (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))), (MOVHPDrm VR128:$src1, addr:$src2)>; + + def : Pat<(store (f64 (vector_extract + (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))), + (iPTR 0))), addr:$dst), + (MOVHPDmr addr:$dst, VR128:$src)>; } //===----------------------------------------------------------------------===// diff --git a/test/CodeGen/X86/extractelement-load.ll b/test/CodeGen/X86/extractelement-load.ll index 86475997693..732f698f59f 100644 --- a/test/CodeGen/X86/extractelement-load.ll +++ b/test/CodeGen/X86/extractelement-load.ll @@ -1,5 +1,6 @@ ; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | FileCheck %s ; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | FileCheck %s +; RUN: llc < %s -march=x86-64 -mattr=+avx -mcpu=btver2 | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @@ -29,16 +30,15 @@ undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3> ; This case could easily end up inf-looping in the DAG combiner due to an ; low alignment load of the vector which prevents us from reliably forming a ; narrow load. -; FIXME: It would be nice to detect whether the target has fast and legal -; unaligned loads and use them here. + +; The expected codegen is identical for the AVX case except +; load/store instructions will have a leading 'v', so we don't +; need to special-case the checks. + define void @t3() { ; CHECK-LABEL: t3: -; -; This movs the entire vector, shuffling the high double down. If we fixed the -; FIXME above it would just move the high double directly. ; CHECK: movupd -; CHECK: shufpd -; CHECK: movlpd +; CHECK: movhpd bb: %tmp13 = load <2 x double>* undef, align 1