mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-21 00:32:23 +00:00
eb1d74e0c8
Remove && from the end of the lines to prevent tests from throwing run lines into the background. Also, clean up places where the same command is run multiple times by using a temporary file. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36142 91177308-0d34-0410-b5e6-96231b3b80d8
74 lines
2.7 KiB
LLVM
74 lines
2.7 KiB
LLVM
; Test that vectors are scalarized/lowered correctly.
|
|
; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -mcpu=g3 | \
|
|
; RUN: grep stfs | wc -l | grep 4
|
|
; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -mcpu=g5 -o %t -f
|
|
; RUN: grep vspltw %t | wc -l | grep 2
|
|
; RUN: grep vsplti %t | wc -l | grep 3
|
|
; RUN: grep vsplth %t | wc -l | grep 1
|
|
|
|
%f4 = type <4 x float>
|
|
%i4 = type <4 x int>
|
|
|
|
implementation
|
|
|
|
void %splat(%f4* %P, %f4* %Q, float %X) {
|
|
%tmp = insertelement %f4 undef, float %X, uint 0
|
|
%tmp2 = insertelement %f4 %tmp, float %X, uint 1
|
|
%tmp4 = insertelement %f4 %tmp2, float %X, uint 2
|
|
%tmp6 = insertelement %f4 %tmp4, float %X, uint 3
|
|
%q = load %f4* %Q
|
|
%R = add %f4 %q, %tmp6
|
|
store %f4 %R, %f4* %P
|
|
ret void
|
|
}
|
|
|
|
void %splat_i4(%i4* %P, %i4* %Q, int %X) {
|
|
%tmp = insertelement %i4 undef, int %X, uint 0
|
|
%tmp2 = insertelement %i4 %tmp, int %X, uint 1
|
|
%tmp4 = insertelement %i4 %tmp2, int %X, uint 2
|
|
%tmp6 = insertelement %i4 %tmp4, int %X, uint 3
|
|
%q = load %i4* %Q
|
|
%R = add %i4 %q, %tmp6
|
|
store %i4 %R, %i4* %P
|
|
ret void
|
|
}
|
|
|
|
void %splat_imm_i32(%i4* %P, %i4* %Q, int %X) {
|
|
%q = load %i4* %Q
|
|
%R = add %i4 %q, <int -1, int -1, int -1, int -1>
|
|
store %i4 %R, %i4* %P
|
|
ret void
|
|
}
|
|
|
|
void %splat_imm_i16(%i4* %P, %i4* %Q, int %X) {
|
|
%q = load %i4* %Q
|
|
%R = add %i4 %q, <int 65537, int 65537, int 65537, int 65537>
|
|
store %i4 %R, %i4* %P
|
|
ret void
|
|
}
|
|
|
|
void %splat_h(short %tmp, <16 x ubyte>* %dst) {
|
|
%tmp = insertelement <8 x short> undef, short %tmp, uint 0
|
|
%tmp72 = insertelement <8 x short> %tmp, short %tmp, uint 1
|
|
%tmp73 = insertelement <8 x short> %tmp72, short %tmp, uint 2
|
|
%tmp74 = insertelement <8 x short> %tmp73, short %tmp, uint 3
|
|
%tmp75 = insertelement <8 x short> %tmp74, short %tmp, uint 4
|
|
%tmp76 = insertelement <8 x short> %tmp75, short %tmp, uint 5
|
|
%tmp77 = insertelement <8 x short> %tmp76, short %tmp, uint 6
|
|
%tmp78 = insertelement <8 x short> %tmp77, short %tmp, uint 7
|
|
%tmp78 = cast <8 x short> %tmp78 to <16 x ubyte>
|
|
store <16 x ubyte> %tmp78, <16 x ubyte>* %dst
|
|
ret void
|
|
}
|
|
|
|
void %spltish(<16 x ubyte>* %A, <16 x ubyte>* %B) {
|
|
; Gets converted to 16 x ubyte
|
|
%tmp = load <16 x ubyte>* %B
|
|
%tmp.s = cast <16 x ubyte> %tmp to <16 x sbyte>
|
|
%tmp4 = sub <16 x sbyte> %tmp.s, cast (<8 x short> < short 15, short 15, short 15, short 15, short 15, short 15, short 15, short 15 > to <16 x sbyte>)
|
|
%tmp4.u = cast <16 x sbyte> %tmp4 to <16 x ubyte>
|
|
store <16 x ubyte> %tmp4.u, <16 x ubyte>* %A
|
|
ret void
|
|
}
|
|
|