diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp index e4fb07dea37..696910911f9 100644 --- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp @@ -58,6 +58,9 @@ private: bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2); bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2); bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2); + SDValue SimplifyI24(SDValue &Op); + bool SelectI24(SDValue Addr, SDValue &Op); + bool SelectU24(SDValue Addr, SDValue &Op); static bool checkType(const Value *ptr, unsigned int addrspace); @@ -674,7 +677,9 @@ const char *AMDGPUDAGToDAGISel::getPassName() const { #endif #undef DEBUGTMP -///==== AMDGPU Functions ====/// +//===----------------------------------------------------------------------===// +// Complex Patterns +//===----------------------------------------------------------------------===// bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr) { @@ -741,6 +746,49 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, return true; } +SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) { + APInt Demanded = APInt(32, 0x00FFFFFF); + APInt KnownZero, KnownOne; + TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true); + const TargetLowering *TLI = getTargetLowering(); + if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) { + CurDAG->ReplaceAllUsesWith(Op, TLO.New); + CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode()); + return SimplifyI24(TLO.New); + } else { + return Op; + } +} + +bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) { + + assert(Op.getValueType() == MVT::i32); + + if (CurDAG->ComputeNumSignBits(Op) == 9) { + I24 = SimplifyI24(Op); + return true; + } + return false; +} + +bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) { + APInt KnownZero; + APInt KnownOne; + CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne); + + assert (Op.getValueType() == MVT::i32); + + // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than + // i32. These smaller types are legal to use with the i24 instructions. + if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 || + Op.getOpcode() == ISD::ANY_EXTEND || + ISD::isEXTLoad(Op.getNode())) { + U24 = SimplifyI24(Op); + return true; + } + return false; +} + void AMDGPUDAGToDAGISel::PostprocessISelDAG() { if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) { diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td index 04618f27e17..d6a7759503c 100644 --- a/lib/Target/R600/AMDGPUInstructions.td +++ b/lib/Target/R600/AMDGPUInstructions.td @@ -173,6 +173,9 @@ def FP_ONE : PatLeaf < [{return N->isExactlyValue(1.0);}] >; +def U24 : ComplexPattern; +def I24 : ComplexPattern; + let isCodeGenOnly = 1, isPseudo = 1 in { let usesCustomInserter = 1 in { @@ -366,6 +369,16 @@ class ROTRPattern : Pat < (BIT_ALIGN $src0, $src0, $src1) >; +// 24-bit arithmetic patterns +def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>; + +/* +class UMUL24Pattern : Pat < + (mul U24:$x, U24:$y), + (UMUL24 $x, $y) +>; +*/ + include "R600Instructions.td" include "SIInstrInfo.td" diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td index 9aeebc94361..56015ea80a4 100644 --- a/lib/Target/R600/R600Instructions.td +++ b/lib/Target/R600/R600Instructions.td @@ -1473,6 +1473,9 @@ let Predicates = [isEGorCayman] in { def CNDGE_eg : CNDGE_Common<0x1B>; def MUL_LIT_eg : MUL_LIT_Common<0x1F>; def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>; + def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24", + [(set i32:$dst, (mul U24:$src0, U24:$src1))], VecALU + >; def DOT4_eg : DOT4_Common<0xBE>; defm CUBE_eg : CUBE_Common<0xC0>; @@ -1703,6 +1706,10 @@ defm R600_ : RegisterLoadStore ; let Predicates = [isCayman] in { +def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24", + [(set i32:$dst, (mul I24:$src0, I24:$src1))], VecALU +>; + let isVector = 1 in { def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>; diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td index 61163c2982d..8f3baaab615 100644 --- a/lib/Target/R600/SIInstructions.td +++ b/lib/Target/R600/SIInstructions.td @@ -866,14 +866,16 @@ defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32", [(set f32:$dst, (fmul f32:$src0, f32:$src1))] >; -} // End isCommutable = 1 -//defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", []>; +defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", + [(set i32:$dst, (mul I24:$src0, I24:$src1))] +>; //defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>; -//defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", []>; +defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", + [(set i32:$dst, (mul U24:$src0, U24:$src1))] +>; //defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>; -let isCommutable = 1 in { defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32", [(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))] diff --git a/test/CodeGen/R600/mul_int24.ll b/test/CodeGen/R600/mul_int24.ll new file mode 100644 index 00000000000..16ae7601109 --- /dev/null +++ b/test/CodeGen/R600/mul_int24.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK +; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK +; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK + +; EG-CHECK: @i32_mul24 +; Signed 24-bit multiply is not supported on pre-Cayman GPUs. +; EG-CHECK: MULLO_INT +; CM-CHECK: MUL_INT24 {{[ *]*}}T{{[0-9].[XYZW]}}, KC0[2].Z, KC0[2].W +; SI-CHECK: V_MUL_I32_I24 +define void @i32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) { +entry: + %0 = shl i32 %a, 8 + %a_24 = ashr i32 %0, 8 + %1 = shl i32 %b, 8 + %b_24 = ashr i32 %1, 8 + %2 = mul i32 %a_24, %b_24 + store i32 %2, i32 addrspace(1)* %out + ret void +} diff --git a/test/CodeGen/R600/mul_uint24.ll b/test/CodeGen/R600/mul_uint24.ll new file mode 100644 index 00000000000..b1a7f94402e --- /dev/null +++ b/test/CodeGen/R600/mul_uint24.ll @@ -0,0 +1,65 @@ +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK +; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK +; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK + +; EG-CHECK: @u32_mul24 +; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W +; SI-CHECK: @u32_mul24 +; SI-CHECK: V_MUL_U32_U24 + +define void @u32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) { +entry: + %0 = shl i32 %a, 8 + %a_24 = lshr i32 %0, 8 + %1 = shl i32 %b, 8 + %b_24 = lshr i32 %1, 8 + %2 = mul i32 %a_24, %b_24 + store i32 %2, i32 addrspace(1)* %out + ret void +} + +; EG-CHECK: @i16_mul24 +; EG-CHECK-DAG: VTX_READ_16 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40 +; EG-CHECK-DAG: VTX_READ_16 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44 +; The order of A and B does not matter. +; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]] +; The result must be sign-extended +; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x +; EG-CHECK: 16 +; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x +; EG-CHECK: 16 +; SI-CHECK: @i16_mul24 +; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}} +; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 16, [[MUL]] +; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 16, [[LSHL]] + +define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) { +entry: + %0 = mul i16 %a, %b + %1 = sext i16 %0 to i32 + store i32 %1, i32 addrspace(1)* %out + ret void +} + +; EG-CHECK: @i8_mul24 +; EG-CHECK-DAG: VTX_READ_8 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40 +; EG-CHECK-DAG: VTX_READ_8 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44 +; The order of A and B does not matter. +; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]] +; The result must be sign-extended +; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x +; EG-CHECK: 24 +; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x +; EG-CHECK: 24 +; SI-CHECK: @i8_mul24 +; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}} +; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 24, [[MUL]] +; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 24, [[LSHL]] + +define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) { +entry: + %0 = mul i8 %a, %b + %1 = sext i8 %0 to i32 + store i32 %1, i32 addrspace(1)* %out + ret void +}