mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-06 09:44:39 +00:00
Improve test to actually check for a folded load.
This test was checking for lack of a "movaps" (an aligned load) rather than a "movups" (an unaligned load). It also included a store which complicated the checking. Add specific CPU runs to prevent subtarget feature flag overrides from inhibiting this optimization. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227972 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
7df357f552
commit
ec60318bf5
@ -1,16 +1,20 @@
|
||||
; Use CPU parameters to ensure that a CPU-specific attribute is not overriding the AVX definition.
|
||||
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
|
||||
|
||||
;CHECK: @test
|
||||
; No need to load from memory. The operand will be loaded as part of the AND instr.
|
||||
;CHECK-NOT: vmovaps
|
||||
;CHECK: vandps
|
||||
;CHECK: ret
|
||||
; No need to load unaligned operand from memory using an explicit instruction with AVX.
|
||||
; The operand should be folded into the AND instr.
|
||||
|
||||
define void @test1(<8 x i32>* %p0, <8 x i32> %in1) nounwind {
|
||||
entry:
|
||||
%in0 = load <8 x i32>* %p0, align 2
|
||||
%a = and <8 x i32> %in0, %in1
|
||||
store <8 x i32> %a, <8 x i32>* undef
|
||||
ret void
|
||||
define <4 x i32> @test1(<4 x i32>* %p0, <4 x i32> %in1) nounwind {
|
||||
%in0 = load <4 x i32>* %p0, align 2
|
||||
%a = and <4 x i32> %in0, %in1
|
||||
ret <4 x i32> %a
|
||||
|
||||
; CHECK-LABEL: @test1
|
||||
; CHECK-NOT: vmovups
|
||||
; CHECK: vandps (%rdi), %xmm0, %xmm0
|
||||
; CHECK-NEXT: ret
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user