mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-28 04:33:05 +00:00
4abb04a65c
a flawed direction and causing miscompiles. Read on for details. Fundamentally, the premise of this patch series was to map VECTOR_SHUFFLE DAG nodes into VSELECT DAG nodes for all blends because we are going to *have* to lower to VSELECT nodes for some blends to trigger the instruction selection patterns of variable blend instructions. This doesn't actually work out so well. In order to match performance with the existing VECTOR_SHUFFLE lowering code, we would need to re-slice the blend in order to fit it into either the integer or floating point blends available on the ISA. When coming from VECTOR_SHUFFLE (or other vNi1 style VSELECT sources) this works well because the X86 backend ensures that these types of operands to VSELECT get sign extended into '-1' and '0' for true and false, allowing us to re-slice the bits in whatever granularity without changing semantics. However, if the VSELECT condition comes from some other source, for example code lowering vector comparisons, it will likely only have the required bit set -- the high bit. We can't blindly slice up this style of VSELECT. Reid found some code using Halide that triggers this and I'm hopeful to eventually get a test case, but I don't need it to understand why this is A Bad Idea. There is another aspect that makes this approach flawed. When in VECTOR_SHUFFLE form, we have very distilled information that represents the *constant* blend mask. Converting back to a VSELECT form actually can lose this information, and so I think now that it is better to treat this as VECTOR_SHUFFLE until the very last moment and only use VSELECT nodes for instruction selection purposes. My plan is to: 1) Clean up and formalize the target pre-legalization DAG combine that converts a VSELECT with a constant condition operand into a VECTOR_SHUFFLE. 2) Remove any fancy lowering from VSELECT during *legalization* relying entirely on the DAG combine to catch cases where we can match to an immediate-controlled blend instruction. One additional step that I'm not planning on but would be interested in others' opinions on: we could add an X86ISD::VSELECT or X86ISD::BLENDV which encodes a fully legalized VSELECT node. Then it would be easy to write isel patterns only in terms of this to ensure VECTOR_SHUFFLE legalization only ever forms the fully legalized construct and we can't cycle between it and VSELECT combining. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218658 91177308-0d34-0410-b5e6-96231b3b80d8
137 lines
5.5 KiB
LLVM
137 lines
5.5 KiB
LLVM
; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=corei7-avx | FileCheck %s -check-prefix=X32 --check-prefix=CHECK
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s -check-prefix=X64 --check-prefix=CHECK
|
|
|
|
define <4 x i32> @blendvb_fallback_v4i32(<4 x i1> %mask, <4 x i32> %x, <4 x i32> %y) {
|
|
; CHECK-LABEL: @blendvb_fallback_v4i32
|
|
; CHECK: vblendvps
|
|
; CHECK: ret
|
|
%ret = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %y
|
|
ret <4 x i32> %ret
|
|
}
|
|
|
|
define <8 x i32> @blendvb_fallback_v8i32(<8 x i1> %mask, <8 x i32> %x, <8 x i32> %y) {
|
|
; CHECK-LABEL: @blendvb_fallback_v8i32
|
|
; CHECK: vblendvps
|
|
; CHECK: ret
|
|
%ret = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
|
|
ret <8 x i32> %ret
|
|
}
|
|
|
|
define <8 x float> @blendvb_fallback_v8f32(<8 x i1> %mask, <8 x float> %x, <8 x float> %y) {
|
|
; CHECK-LABEL: @blendvb_fallback_v8f32
|
|
; CHECK: vblendvps
|
|
; CHECK: ret
|
|
%ret = select <8 x i1> %mask, <8 x float> %x, <8 x float> %y
|
|
ret <8 x float> %ret
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone
|
|
|
|
define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
|
|
; CHECK-LABEL: insertps_from_vector_load:
|
|
; On X32, account for the argument's move to registers
|
|
; X32: movl 4(%esp), %eax
|
|
; CHECK-NOT: mov
|
|
; CHECK: insertps $48
|
|
; CHECK-NEXT: ret
|
|
%1 = load <4 x float>* %pb, align 16
|
|
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
|
|
ret <4 x float> %2
|
|
}
|
|
|
|
;; Use a non-zero CountS for insertps
|
|
define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
|
|
; CHECK-LABEL: insertps_from_vector_load_offset:
|
|
; On X32, account for the argument's move to registers
|
|
; X32: movl 4(%esp), %eax
|
|
; CHECK-NOT: mov
|
|
;; Try to match a bit more of the instr, since we need the load's offset.
|
|
; CHECK: insertps $96, 4(%{{...}}), %
|
|
; CHECK-NEXT: ret
|
|
%1 = load <4 x float>* %pb, align 16
|
|
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
|
|
ret <4 x float> %2
|
|
}
|
|
|
|
define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
|
|
; CHECK-LABEL: insertps_from_vector_load_offset_2:
|
|
; On X32, account for the argument's move to registers
|
|
; X32: movl 4(%esp), %eax
|
|
; X32: movl 8(%esp), %ecx
|
|
; CHECK-NOT: mov
|
|
;; Try to match a bit more of the instr, since we need the load's offset.
|
|
; CHECK: vinsertps $-64, 12(%{{...}},%{{...}}), %
|
|
; CHECK-NEXT: ret
|
|
%1 = getelementptr inbounds <4 x float>* %pb, i64 %index
|
|
%2 = load <4 x float>* %1, align 16
|
|
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
|
|
ret <4 x float> %3
|
|
}
|
|
|
|
define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
|
|
; CHECK-LABEL: insertps_from_broadcast_loadf32:
|
|
; On X32, account for the arguments' move to registers
|
|
; X32: movl 8(%esp), %eax
|
|
; X32: movl 4(%esp), %ecx
|
|
; CHECK-NOT: mov
|
|
; CHECK: insertps $48
|
|
; CHECK-NEXT: ret
|
|
%1 = getelementptr inbounds float* %fb, i64 %index
|
|
%2 = load float* %1, align 4
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
ret <4 x float> %7
|
|
}
|
|
|
|
define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
|
|
; CHECK-LABEL: insertps_from_broadcast_loadv4f32:
|
|
; On X32, account for the arguments' move to registers
|
|
; X32: movl 4(%esp), %{{...}}
|
|
; CHECK-NOT: mov
|
|
; CHECK: insertps $48
|
|
; CHECK-NEXT: ret
|
|
%1 = load <4 x float>* %b, align 4
|
|
%2 = extractelement <4 x float> %1, i32 0
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
ret <4 x float> %7
|
|
}
|
|
|
|
;; FIXME: We're emitting an extraneous pshufd/vbroadcast.
|
|
define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
|
|
; CHECK-LABEL: insertps_from_broadcast_multiple_use:
|
|
; On X32, account for the arguments' move to registers
|
|
; X32: movl 8(%esp), %eax
|
|
; X32: movl 4(%esp), %ecx
|
|
; CHECK: vbroadcastss
|
|
; CHECK-NOT: mov
|
|
; CHECK: insertps $48
|
|
; CHECK: insertps $48
|
|
; CHECK: insertps $48
|
|
; CHECK: insertps $48
|
|
; CHECK: vaddps
|
|
; CHECK: vaddps
|
|
; CHECK: vaddps
|
|
; CHECK-NEXT: ret
|
|
%1 = getelementptr inbounds float* %fb, i64 %index
|
|
%2 = load float* %1, align 4
|
|
%3 = insertelement <4 x float> undef, float %2, i32 0
|
|
%4 = insertelement <4 x float> %3, float %2, i32 1
|
|
%5 = insertelement <4 x float> %4, float %2, i32 2
|
|
%6 = insertelement <4 x float> %5, float %2, i32 3
|
|
%7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
|
|
%8 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %b, <4 x float> %6, i32 48)
|
|
%9 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %c, <4 x float> %6, i32 48)
|
|
%10 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %d, <4 x float> %6, i32 48)
|
|
%11 = fadd <4 x float> %7, %8
|
|
%12 = fadd <4 x float> %9, %10
|
|
%13 = fadd <4 x float> %11, %12
|
|
ret <4 x float> %13
|
|
}
|