mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
[X86, AVX] adjust tablegen patterns to generate better code for scalar insertion into zero vector (PR23073)
For code like this: define <8 x i32> @load_v8i32() { ret <8 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> } We produce this AVX code: _load_v8i32: ## @load_v8i32 movl $7, %eax vmovd %eax, %xmm0 vxorps %ymm1, %ymm1, %ymm1 vblendps $1, %ymm0, %ymm1, %ymm0 ## ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7] retq There are at least 2 bugs in play here: We're generating a blend when a move scalar does the same job using 2 less instruction bytes (see FIXMEs). We're not matching an existing pattern that would eliminate the xor and blend entirely. The zero bytes are free with vmovd. The 2nd fix involves an adjustment of "AddedComplexity" [1] and mostly masks the 1st problem. [1] AddedComplexity has close to no documentation in the source. The best we have is this comment: "roughly corresponds to the number of nodes that are covered". It appears that x86 has bastardized this definition by inflating its values for some other undocumented reason. For example, we have a pattern with "AddedComplexity = 400" (!). I searched my way to this page: https://groups.google.com/forum/#!topic/llvm-dev/5UX-Og9M0xQ Differential Revision: http://reviews.llvm.org/D8794 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@233931 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e1aa33fb9a
commit
8765e82c83
@ -7168,6 +7168,10 @@ let Predicates = [HasAVX2] in {
|
||||
}
|
||||
|
||||
// Patterns
|
||||
// FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
|
||||
// on targets where they have equal performance. These were changed to use
|
||||
// blends because blends have better throughput on SandyBridge and Haswell, but
|
||||
// movs[s/d] are 1-2 byte shorter instructions.
|
||||
let Predicates = [UseAVX] in {
|
||||
let AddedComplexity = 15 in {
|
||||
// Move scalar to XMM zero-extended, zeroing a VR128 then do a
|
||||
@ -7184,8 +7188,10 @@ let Predicates = [UseAVX] in {
|
||||
// Move low f32 and clear high bits.
|
||||
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
|
||||
(VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
|
||||
def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
|
||||
(VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
|
||||
|
||||
// Move low f64 and clear high bits.
|
||||
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
|
||||
(VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
|
||||
}
|
||||
|
||||
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
|
||||
@ -7199,14 +7205,19 @@ let Predicates = [UseAVX] in {
|
||||
(v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
|
||||
sub_xmm)>;
|
||||
|
||||
// Move low f64 and clear high bits.
|
||||
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
|
||||
(VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
|
||||
|
||||
// These will incur an FP/int domain crossing penalty, but it may be the only
|
||||
// way without AVX2. Do not add any complexity because we may be able to match
|
||||
// more optimal patterns defined earlier in this file.
|
||||
def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
|
||||
(VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
|
||||
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
|
||||
(VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
|
||||
}
|
||||
|
||||
// FIXME: Prefer a movss or movsd over a blendps when optimizing for size or
|
||||
// on targets where they have equal performance. These were changed to use
|
||||
// blends because blends have better throughput on SandyBridge and Haswell, but
|
||||
// movs[s/d] are 1-2 byte shorter instructions.
|
||||
let Predicates = [UseSSE41] in {
|
||||
// With SSE41 we can use blends for these patterns.
|
||||
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
|
||||
|
@ -843,8 +843,9 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
|
||||
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
|
||||
; ALL-LABEL: insert_reg_and_zero_v4f64:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
||||
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; ALL-NEXT: # kill: XMM0<def> XMM0<kill> YMM0<def>
|
||||
; ALL-NEXT: vxorpd %ymm1, %ymm1, %ymm1
|
||||
; ALL-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
|
||||
; ALL-NEXT: retq
|
||||
%v = insertelement <4 x double> undef, double %a, i32 0
|
||||
%shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
|
@ -133,8 +133,6 @@ define <8 x float> @shuffle_v8f32_70000000(<8 x float> %a, <8 x float> %b) {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: movl $7, %eax
|
||||
; AVX2-NEXT: vmovd %eax, %xmm1
|
||||
; AVX2-NEXT: vxorps %ymm2, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7]
|
||||
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
||||
@ -962,8 +960,6 @@ define <8 x i32> @shuffle_v8i32_70000000(<8 x i32> %a, <8 x i32> %b) {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: movl $7, %eax
|
||||
; AVX2-NEXT: vmovd %eax, %xmm1
|
||||
; AVX2-NEXT: vxorps %ymm2, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7]
|
||||
; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
%shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 7, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
|
||||
|
Loading…
Reference in New Issue
Block a user