mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-25 14:32:53 +00:00
551a3d7b56
We recently discovered an issue that reinforces what a good idea it is to always specify -mcpu in our code generation tests, particularly for -mattr=+vsx. This patch ensures that all tests that specify -mattr=+vsx also specify -mcpu=pwr7 or -mcpu=pwr8, as appropriate. Some of the uses of -mattr=+vsx added recently don't make much sense (when specified for -mtriple=powerpc-apple-darwin8 or -march=ppc32, for example). For cases like this I've just removed the extra VSX test commands; there's enough coverage without them. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@220173 91177308-0d34-0410-b5e6-96231b3b80d8
25 lines
1.2 KiB
LLVM
25 lines
1.2 KiB
LLVM
; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=+altivec --enable-unsafe-fp-math | FileCheck %s
|
|
|
|
define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
|
|
%tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
|
|
%tmp3 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
|
|
%tmp4 = fmul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1]
|
|
store <4 x float> %tmp4, <4 x float>* %P3
|
|
store <4 x float> zeroinitializer, <4 x float>* %P1
|
|
store <4 x i32> zeroinitializer, <4 x i32>* %P2
|
|
ret void
|
|
}
|
|
; The fmul will spill a vspltisw to create a -0.0 vector used as the addend
|
|
; to vmaddfp (so it would IEEE compliant with zero sign propagation).
|
|
; CHECK: @VXOR
|
|
; CHECK: vsplti
|
|
; CHECK: vxor
|
|
|
|
define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
|
|
store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
|
|
store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3
|
|
ret void
|
|
}
|
|
; CHECK: @VSPLTI
|
|
; CHECK: vsplti
|