mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-11-06 20:18:14 +00:00
This patch fixes the Altivec addend construction for the fused multiply-add
instruction (vmaddfp) to conform with IEEE to ensure the sign of a zero result when resulting product is -0.0. The -0.0 vector addend to vmaddfp is generated by a creating a vector with full bits sets and then shifting each elements by 31-bits to the left, resulting in a vector of 0x80000000 (or -0.0 as float). The 'buildvec_canonicalize.ll' was adjusted to reflect this change and the 'vec_mul.ll' was complemented with the float vector multiplication test. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@168998 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -158,10 +158,6 @@ def vecspltisw : PatLeaf<(build_vector), [{
|
|||||||
return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0;
|
return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0;
|
||||||
}], VSPLTISW_get_imm>;
|
}], VSPLTISW_get_imm>;
|
||||||
|
|
||||||
def V_immneg0 : PatLeaf<(build_vector), [{
|
|
||||||
return PPC::isAllNegativeZeroVector(N);
|
|
||||||
}]>;
|
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Helpers for defining instructions that directly correspond to intrinsics.
|
// Helpers for defining instructions that directly correspond to intrinsics.
|
||||||
|
|
||||||
@@ -585,7 +581,12 @@ def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
|
|||||||
def V_SET0 : VXForm_setzero<1220, (outs VRRC:$vD), (ins),
|
def V_SET0 : VXForm_setzero<1220, (outs VRRC:$vD), (ins),
|
||||||
"vxor $vD, $vD, $vD", VecFP,
|
"vxor $vD, $vD, $vD", VecFP,
|
||||||
[(set VRRC:$vD, (v4i32 immAllZerosV))]>;
|
[(set VRRC:$vD, (v4i32 immAllZerosV))]>;
|
||||||
|
let IMM=-1 in {
|
||||||
|
def V_SETALLONES : VXForm_3<908, (outs VRRC:$vD), (ins),
|
||||||
|
"vspltisw $vD, -1", VecFP,
|
||||||
|
[(set VRRC:$vD, (v4i32 immAllOnesV))]>;
|
||||||
}
|
}
|
||||||
|
} // VALU Operations.
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Additional Altivec Patterns
|
// Additional Altivec Patterns
|
||||||
@@ -672,7 +673,8 @@ def : Pat<(v4i32 (and VRRC:$A, (vnot_ppc VRRC:$B))),
|
|||||||
(VANDC VRRC:$A, VRRC:$B)>;
|
(VANDC VRRC:$A, VRRC:$B)>;
|
||||||
|
|
||||||
def : Pat<(fmul VRRC:$vA, VRRC:$vB),
|
def : Pat<(fmul VRRC:$vA, VRRC:$vB),
|
||||||
(VMADDFP VRRC:$vA, VRRC:$vB, (v4i32 (V_SET0)))>;
|
(VMADDFP VRRC:$vA, VRRC:$vB,
|
||||||
|
(v4i32 (VSLW (V_SETALLONES), (V_SETALLONES))))>;
|
||||||
|
|
||||||
// Fused multiply add and multiply sub for packed float. These are represented
|
// Fused multiply add and multiply sub for packed float. These are represented
|
||||||
// separately from the real instructions above, for operations that must have
|
// separately from the real instructions above, for operations that must have
|
||||||
|
|||||||
@@ -1,10 +1,4 @@
|
|||||||
; There should be exactly one vxor here.
|
; RUN: llc < %s -mattr=+altivec --enable-unsafe-fp-math | FileCheck %s
|
||||||
; RUN: llc < %s -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \
|
|
||||||
; RUN: grep vxor | count 1
|
|
||||||
|
|
||||||
; There should be exactly one vsplti here.
|
|
||||||
; RUN: llc < %s -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \
|
|
||||||
; RUN: grep vsplti | count 1
|
|
||||||
|
|
||||||
define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
|
define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
|
||||||
%tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
|
%tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
|
||||||
@@ -15,10 +9,16 @@ define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
|
|||||||
store <4 x i32> zeroinitializer, <4 x i32>* %P2
|
store <4 x i32> zeroinitializer, <4 x i32>* %P2
|
||||||
ret void
|
ret void
|
||||||
}
|
}
|
||||||
|
; The fmul will spill a vspltisw to create a -0.0 vector used as the addend
|
||||||
|
; to vmaddfp (so it would IEEE compliant with zero sign propagation).
|
||||||
|
; CHECK: @VXOR
|
||||||
|
; CHECK: vsplti
|
||||||
|
; CHECK: vxor
|
||||||
|
|
||||||
define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
|
define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
|
||||||
store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
|
store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
|
||||||
store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3
|
store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3
|
||||||
ret void
|
ret void
|
||||||
}
|
}
|
||||||
|
; CHECK: @VSPLTI
|
||||||
|
; CHECK: vsplti
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep mullw
|
; RUN: llc < %s -mattr=+altivec | FileCheck %s
|
||||||
; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vmsumuhm
|
|
||||||
|
|
||||||
define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
|
define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
|
||||||
%tmp = load <4 x i32>* %X ; <<4 x i32>> [#uses=1]
|
%tmp = load <4 x i32>* %X ; <<4 x i32>> [#uses=1]
|
||||||
@@ -7,6 +6,9 @@ define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
|
|||||||
%tmp3 = mul <4 x i32> %tmp, %tmp2 ; <<4 x i32>> [#uses=1]
|
%tmp3 = mul <4 x i32> %tmp, %tmp2 ; <<4 x i32>> [#uses=1]
|
||||||
ret <4 x i32> %tmp3
|
ret <4 x i32> %tmp3
|
||||||
}
|
}
|
||||||
|
; CHECK: test_v4i32:
|
||||||
|
; CHECK: vmsumuhm
|
||||||
|
; CHECK-NOT: mullw
|
||||||
|
|
||||||
define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
|
define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
|
||||||
%tmp = load <8 x i16>* %X ; <<8 x i16>> [#uses=1]
|
%tmp = load <8 x i16>* %X ; <<8 x i16>> [#uses=1]
|
||||||
@@ -14,6 +16,9 @@ define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
|
|||||||
%tmp3 = mul <8 x i16> %tmp, %tmp2 ; <<8 x i16>> [#uses=1]
|
%tmp3 = mul <8 x i16> %tmp, %tmp2 ; <<8 x i16>> [#uses=1]
|
||||||
ret <8 x i16> %tmp3
|
ret <8 x i16> %tmp3
|
||||||
}
|
}
|
||||||
|
; CHECK: test_v8i16:
|
||||||
|
; CHECK: vmladduhm
|
||||||
|
; CHECK-NOT: mullw
|
||||||
|
|
||||||
define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
|
define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
|
||||||
%tmp = load <16 x i8>* %X ; <<16 x i8>> [#uses=1]
|
%tmp = load <16 x i8>* %X ; <<16 x i8>> [#uses=1]
|
||||||
@@ -21,3 +26,21 @@ define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
|
|||||||
%tmp3 = mul <16 x i8> %tmp, %tmp2 ; <<16 x i8>> [#uses=1]
|
%tmp3 = mul <16 x i8> %tmp, %tmp2 ; <<16 x i8>> [#uses=1]
|
||||||
ret <16 x i8> %tmp3
|
ret <16 x i8> %tmp3
|
||||||
}
|
}
|
||||||
|
; CHECK: test_v16i8:
|
||||||
|
; CHECK: vmuloub
|
||||||
|
; CHECK: vmuleub
|
||||||
|
; CHECK-NOT: mullw
|
||||||
|
|
||||||
|
define <4 x float> @test_float(<4 x float>* %X, <4 x float>* %Y) {
|
||||||
|
%tmp = load <4 x float>* %X
|
||||||
|
%tmp2 = load <4 x float>* %Y
|
||||||
|
%tmp3 = fmul <4 x float> %tmp, %tmp2
|
||||||
|
ret <4 x float> %tmp3
|
||||||
|
}
|
||||||
|
; Check the creation of a negative zero float vector by creating a vector of
|
||||||
|
; all bits set and shifting it 31 bits to left, resulting a an vector of
|
||||||
|
; 4 x 0x80000000 (-0.0 as float).
|
||||||
|
; CHECK: test_float:
|
||||||
|
; CHECK: vspltisw [[ZNEG:[0-9]+]], -1
|
||||||
|
; CHECK: vslw {{[0-9]+}}, [[ZNEG]], [[ZNEG]]
|
||||||
|
; CHECK: vmaddfp
|
||||||
|
|||||||
Reference in New Issue
Block a user