mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-02 07:11:49 +00:00
[PowerPC] Handle VSX v2i64 SIGN_EXTEND_INREG
sitofp from v2i32 to v2f64 ends up generating a SIGN_EXTEND_INREG v2i64 node (and similarly for v2i16 and v2i8). Even though there are no sign-extension (or algebraic shifts) for v2i64 types, we can handle v2i32 sign extensions by converting two and from v2i64. The small trick necessary here is to shift the i32 elements into the right lanes before the i32 -> f64 step. This is because of the big Endian nature of the system, we need the i32 portion in the high word of the i64 elements. For v2i16 and v2i8 we can do the same, but we first use the default Altivec shift-based expansion from v2i16 or v2i8 to v2i32 (by casting to v4i32) and then apply the above procedure. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205146 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
368a977298
commit
ee8e48d4c9
@ -600,6 +600,13 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
|
||||
setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
|
||||
setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
|
||||
|
||||
// Vector operation legalization checks the result type of
|
||||
// SIGN_EXTEND_INREG, overall legalization checks the inner type.
|
||||
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
|
||||
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
|
||||
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
|
||||
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
|
||||
|
||||
addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
|
||||
}
|
||||
}
|
||||
@ -5947,6 +5954,30 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
||||
return Flags;
|
||||
}
|
||||
|
||||
SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
SDLoc dl(Op);
|
||||
// For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int
|
||||
// instructions), but for smaller types, we need to first extend up to v2i32
|
||||
// before doing going farther.
|
||||
if (Op.getValueType() == MVT::v2i64) {
|
||||
EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
|
||||
if (ExtVT != MVT::v2i32) {
|
||||
Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0));
|
||||
Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op,
|
||||
DAG.getValueType(EVT::getVectorVT(*DAG.getContext(),
|
||||
ExtVT.getVectorElementType(), 4)));
|
||||
Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op);
|
||||
Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op,
|
||||
DAG.getValueType(MVT::v2i32));
|
||||
}
|
||||
|
||||
return Op;
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
SDLoc dl(Op);
|
||||
@ -6074,6 +6105,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
||||
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
|
||||
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
|
||||
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
|
||||
case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
|
||||
case ISD::MUL: return LowerMUL(Op, DAG);
|
||||
|
||||
// For counter-based loop handling.
|
||||
|
@ -535,6 +535,7 @@ namespace llvm {
|
||||
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
||||
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
|
@ -791,6 +791,18 @@ def : Pat<(v2f64 (bitconvert v2i64:$A)),
|
||||
def : Pat<(v2i64 (bitconvert v2f64:$A)),
|
||||
(COPY_TO_REGCLASS $A, VRRC)>;
|
||||
|
||||
// sign extension patterns
|
||||
// To extend "in place" from v2i32 to v2i64, we have input data like:
|
||||
// | undef | i32 | undef | i32 |
|
||||
// but xvcvsxwdp expects the input in big-Endian format:
|
||||
// | i32 | undef | i32 | undef |
|
||||
// so we need to shift everything to the left by one i32 (word) before
|
||||
// the conversion.
|
||||
def : Pat<(sext_inreg v2i64:$C, v2i32),
|
||||
(XVCVDPSXDS (XVCVSXWDP (XXSLDWI $C, $C, 1)))>;
|
||||
def : Pat<(v2f64 (sint_to_fp (sext_inreg v2i64:$C, v2i32))),
|
||||
(XVCVSXWDP (XXSLDWI $C, $C, 1))>;
|
||||
|
||||
} // AddedComplexity
|
||||
} // HasVSX
|
||||
|
||||
|
@ -580,3 +580,41 @@ define <2 x i1> @test67(<2 x i64> %a, <2 x i64> %b) {
|
||||
; CHECK: blr
|
||||
}
|
||||
|
||||
define <2 x double> @test68(<2 x i32> %a) {
|
||||
%w = sitofp <2 x i32> %a to <2 x double>
|
||||
ret <2 x double> %w
|
||||
|
||||
; CHECK-LABEL: @test68
|
||||
; CHECK: xxsldwi [[V1:[0-9]+]], 34, 34, 1
|
||||
; CHECK: xvcvsxwdp 34, [[V1]]
|
||||
; CHECK: blr
|
||||
}
|
||||
|
||||
define <2 x double> @test69(<2 x i16> %a) {
|
||||
%w = sitofp <2 x i16> %a to <2 x double>
|
||||
ret <2 x double> %w
|
||||
|
||||
; CHECK-LABEL: @test69
|
||||
; CHECK: vspltisw [[V1:[0-9]+]], 8
|
||||
; CHECK: vadduwm [[V2:[0-9]+]], [[V1]], [[V1]]
|
||||
; CHECK: vslw [[V3:[0-9]+]], 2, [[V2]]
|
||||
; CHECK: vsraw {{[0-9]+}}, [[V3]], [[V2]]
|
||||
; CHECK: xxsldwi [[V4:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}, 1
|
||||
; CHECK: xvcvsxwdp 34, [[V4]]
|
||||
; CHECK: blr
|
||||
}
|
||||
|
||||
define <2 x double> @test70(<2 x i8> %a) {
|
||||
%w = sitofp <2 x i8> %a to <2 x double>
|
||||
ret <2 x double> %w
|
||||
|
||||
; CHECK-LABEL: @test70
|
||||
; CHECK: vspltisw [[V1:[0-9]+]], 12
|
||||
; CHECK: vadduwm [[V2:[0-9]+]], [[V1]], [[V1]]
|
||||
; CHECK: vslw [[V3:[0-9]+]], 2, [[V2]]
|
||||
; CHECK: vsraw {{[0-9]+}}, [[V3]], [[V2]]
|
||||
; CHECK: xxsldwi [[V4:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}, 1
|
||||
; CHECK: xvcvsxwdp 34, [[V4]]
|
||||
; CHECK: blr
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user