mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
3cf9267d4e
r224330 introduced a bug by misinterpreting the "FeatureVectorUAMem" bit. The commit log says that change did not affect anything, but that's not correct. That change allowed SSE instructions to have unaligned mem operands folded into math ops, and that's not allowed in the default specification for any SSE variant. The bug is exposed when compiling for an AVX-capable CPU that had this feature flag but without enabling AVX codegen. Another mistake in r224330 was not adding the feature flag to all AVX CPUs; the AMD chips were excluded. This is part of the fix for PR22371 ( http://llvm.org/bugs/show_bug.cgi?id=22371 ). This feature bit is SSE-specific, so I've renamed it to "FeatureSSEUnalignedMem". Changed the existing test case for the feature bit to reflect the new name and renamed the test file itself to better reflect the feature. Added runs to fold-vex.ll to check for the failing codegen. Note that the feature bit is not set by default on any CPU because it may require a configuration register setting to enable the enhanced unaligned behavior. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227983 91177308-0d34-0410-b5e6-96231b3b80d8
32 lines
1.3 KiB
LLVM
32 lines
1.3 KiB
LLVM
; Use CPU parameters to ensure that a CPU-specific attribute is not overriding the AVX definition.
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 | FileCheck %s
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-avx | FileCheck %s --check-prefix=SSE
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx -mattr=-avx | FileCheck %s --check-prefix=SSE
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 -mattr=-avx | FileCheck %s --check-prefix=SSE
|
|
|
|
; No need to load unaligned operand from memory using an explicit instruction with AVX.
|
|
; The operand should be folded into the AND instr.
|
|
|
|
; With SSE, folding memory operands into math/logic ops requires 16-byte alignment
|
|
; unless specially configured on some CPUs such as AMD Family 10H.
|
|
|
|
define <4 x i32> @test1(<4 x i32>* %p0, <4 x i32> %in1) nounwind {
|
|
%in0 = load <4 x i32>* %p0, align 2
|
|
%a = and <4 x i32> %in0, %in1
|
|
ret <4 x i32> %a
|
|
|
|
; CHECK-LABEL: @test1
|
|
; CHECK-NOT: vmovups
|
|
; CHECK: vandps (%rdi), %xmm0, %xmm0
|
|
; CHECK-NEXT: ret
|
|
|
|
; SSE-LABEL: @test1
|
|
; SSE: movups (%rdi), %xmm1
|
|
; SSE-NEXT: andps %xmm1, %xmm0
|
|
; SSE-NEXT: ret
|
|
}
|
|
|