mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Match new shuffle codegen for MOVHPD patterns
Add patterns to match SSE (shufpd) and AVX (vpermilpd) shuffle codegen when storing the high element of a v2f64. The existing patterns were only checking for an unpckh type of shuffle. http://llvm.org/bugs/show_bug.cgi?id=21791 Differential Revision: http://reviews.llvm.org/D6586 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@223929 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
7e1839ff01
commit
3cd5b83bb8
@ -1332,6 +1332,8 @@ let Predicates = [HasAVX] in {
|
||||
(bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
|
||||
(VMOVHPSrm VR128:$src1, addr:$src2)>;
|
||||
|
||||
// VMOVHPD patterns
|
||||
|
||||
// FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
|
||||
// is during lowering, where it's not possible to recognize the load fold
|
||||
// cause it has two uses through a bitcast. One use disappears at isel time
|
||||
@ -1344,6 +1346,11 @@ let Predicates = [HasAVX] in {
|
||||
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
|
||||
(bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
|
||||
(VMOVHPDrm VR128:$src1, addr:$src2)>;
|
||||
|
||||
def : Pat<(store (f64 (vector_extract
|
||||
(v2f64 (X86VPermilpi VR128:$src, (i8 1))),
|
||||
(iPTR 0))), addr:$dst),
|
||||
(VMOVHPDmr addr:$dst, VR128:$src)>;
|
||||
}
|
||||
|
||||
let Predicates = [UseSSE1] in {
|
||||
@ -1357,6 +1364,8 @@ let Predicates = [UseSSE1] in {
|
||||
}
|
||||
|
||||
let Predicates = [UseSSE2] in {
|
||||
// MOVHPD patterns
|
||||
|
||||
// FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
|
||||
// is during lowering, where it's not possible to recognize the load fold
|
||||
// cause it has two uses through a bitcast. One use disappears at isel time
|
||||
@ -1369,6 +1378,11 @@ let Predicates = [UseSSE2] in {
|
||||
def : Pat<(v2f64 (X86Unpckl VR128:$src1,
|
||||
(bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
|
||||
(MOVHPDrm VR128:$src1, addr:$src2)>;
|
||||
|
||||
def : Pat<(store (f64 (vector_extract
|
||||
(v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
|
||||
(iPTR 0))), addr:$dst),
|
||||
(MOVHPDmr addr:$dst, VR128:$src)>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -1,5 +1,6 @@
|
||||
; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | FileCheck %s
|
||||
; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | FileCheck %s
|
||||
; RUN: llc < %s -march=x86-64 -mattr=+avx -mcpu=btver2 | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
|
||||
@ -29,16 +30,15 @@ undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
|
||||
; This case could easily end up inf-looping in the DAG combiner due to an
|
||||
; low alignment load of the vector which prevents us from reliably forming a
|
||||
; narrow load.
|
||||
; FIXME: It would be nice to detect whether the target has fast and legal
|
||||
; unaligned loads and use them here.
|
||||
|
||||
; The expected codegen is identical for the AVX case except
|
||||
; load/store instructions will have a leading 'v', so we don't
|
||||
; need to special-case the checks.
|
||||
|
||||
define void @t3() {
|
||||
; CHECK-LABEL: t3:
|
||||
;
|
||||
; This movs the entire vector, shuffling the high double down. If we fixed the
|
||||
; FIXME above it would just move the high double directly.
|
||||
; CHECK: movupd
|
||||
; CHECK: shufpd
|
||||
; CHECK: movlpd
|
||||
; CHECK: movhpd
|
||||
|
||||
bb:
|
||||
%tmp13 = load <2 x double>* undef, align 1
|
||||
|
Loading…
Reference in New Issue
Block a user