diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp index 6b8a36b7cc7..73320a5235f 100644 --- a/lib/Target/R600/SIInstrInfo.cpp +++ b/lib/Target/R600/SIInstrInfo.cpp @@ -908,6 +908,7 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32; case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; + case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32; case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32; case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32; @@ -981,10 +982,14 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const { unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass; const TargetRegisterClass *RC = RI.getRegClass(RCID); unsigned Opcode = AMDGPU::V_MOV_B32_e32; + if (MO.isReg()) { Opcode = AMDGPU::COPY; } else if (RI.isSGPRClass(RC)) { Opcode = AMDGPU::S_MOV_B32; + } else if (MO.isImm()) { + if (RC == &AMDGPU::VSrc_32RegClass) + Opcode = AMDGPU::S_MOV_B32; } const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td index 71792d12074..612f9534520 100644 --- a/lib/Target/R600/SIInstructions.td +++ b/lib/Target/R600/SIInstructions.td @@ -274,11 +274,15 @@ def S_ASHR_I64 : SOP2_SHIFT_64 <0x00000023, "S_ASHR_I64", [(set i64:$dst, (sra i64:$src0, i32:$src1))] >; -} // End AddedComplexity = 1 def S_BFM_B32 : SOP2_32 <0x00000024, "S_BFM_B32", []>; def S_BFM_B64 : SOP2_64 <0x00000025, "S_BFM_B64", []>; -def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", []>; +def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", + [(set i32:$dst, (mul i32:$src0, i32:$src1))] +>; + +} // End AddedComplexity = 1 + def S_BFE_U32 : SOP2_32 <0x00000027, "S_BFE_U32", []>; def S_BFE_I32 : SOP2_32 <0x00000028, "S_BFE_I32", []>; def S_BFE_U64 : SOP2_64 <0x00000029, "S_BFE_U64", []>; @@ -2499,11 +2503,6 @@ def : Pat < def : IMad24Pat; def : UMad24Pat; -def : Pat < - (mul i32:$src0, i32:$src1), - (V_MUL_LO_I32 $src0, $src1) ->; - def : Pat < (mulhu i32:$src0, i32:$src1), (V_MUL_HI_U32 $src0, $src1) @@ -2514,6 +2513,11 @@ def : Pat < (V_MUL_HI_I32 $src0, $src1) >; +def : Pat < + (mul i32:$src0, i32:$src1), + (V_MUL_LO_I32 $src0, $src1) +>; + def : Vop3ModPat; diff --git a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll index f8b4a61a7db..f5d731d0102 100644 --- a/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll +++ b/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll @@ -7,8 +7,8 @@ target triple = "r600--" ; FUNC-LABEL: @test ; OPT: mul nsw i32 ; OPT-NEXT: sext -; SI-LLC: V_MUL_LO_I32 -; SI-LLC-NOT: V_MUL_HI +; SI-LLC: S_MUL_I32 +; SI-LLC-NOT: MUL define void @test(i8 addrspace(1)* nocapture readonly %in, i32 %a, i8 %b) { entry: %0 = mul nsw i32 %a, 3 diff --git a/test/CodeGen/R600/mul.ll b/test/CodeGen/R600/mul.ll index d231e92e27f..fe9c1b90edd 100644 --- a/test/CodeGen/R600/mul.ll +++ b/test/CodeGen/R600/mul.ll @@ -3,14 +3,14 @@ ; mul24 and mad24 are affected -; FUNC-LABEL: @test2 +; FUNC-LABEL: @test_mul_v2i32 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { +define void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1 %a = load <2 x i32> addrspace(1) * %in %b = load <2 x i32> addrspace(1) * %b_ptr @@ -19,7 +19,7 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { ret void } -; FUNC-LABEL: @test4 +; FUNC-LABEL: @v_mul_v4i32 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} @@ -30,7 +30,7 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { +define void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1 %a = load <4 x i32> addrspace(1) * %in %b = load <4 x i32> addrspace(1) * %b_ptr @@ -39,12 +39,26 @@ define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { ret void } -; FUNC-LABEL: @trunc_i64_mul_to_i32 +; FUNC-LABEL: @s_trunc_i64_mul_to_i32 +; SI: S_LOAD_DWORD +; SI: S_LOAD_DWORD +; SI: S_MUL_I32 +; SI: BUFFER_STORE_DWORD +define void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) { + %mul = mul i64 %b, %a + %trunc = trunc i64 %mul to i32 + store i32 %trunc, i32 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @v_trunc_i64_mul_to_i32 ; SI: S_LOAD_DWORD ; SI: S_LOAD_DWORD ; SI: V_MUL_LO_I32 ; SI: BUFFER_STORE_DWORD -define void @trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) { +define void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind { + %a = load i64 addrspace(1)* %aptr, align 8 + %b = load i64 addrspace(1)* %bptr, align 8 %mul = mul i64 %b, %a %trunc = trunc i64 %mul to i32 store i32 %trunc, i32 addrspace(1)* %out, align 8 @@ -56,7 +70,7 @@ define void @trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) { ; FUNC-LABEL: @mul64_sext_c ; EG-DAG: MULLO_INT ; EG-DAG: MULHI_INT -; SI-DAG: V_MUL_LO_I32 +; SI-DAG: S_MUL_I32 ; SI-DAG: V_MUL_HI_I32 define void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) { entry: @@ -66,16 +80,120 @@ entry: ret void } +; FUNC-LABEL: @v_mul64_sext_c: +; EG-DAG: MULLO_INT +; EG-DAG: MULHI_INT +; SI-DAG: V_MUL_LO_I32 +; SI-DAG: V_MUL_HI_I32 +; SI: S_ENDPGM +define void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) { + %val = load i32 addrspace(1)* %in, align 4 + %ext = sext i32 %val to i64 + %mul = mul i64 %ext, 80 + store i64 %mul, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @v_mul64_sext_inline_imm: +; SI-DAG: V_MUL_LO_I32 v{{[0-9]+}}, 9, v{{[0-9]+}} +; SI-DAG: V_MUL_HI_I32 v{{[0-9]+}}, 9, v{{[0-9]+}} +; SI: S_ENDPGM +define void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) { + %val = load i32 addrspace(1)* %in, align 4 + %ext = sext i32 %val to i64 + %mul = mul i64 %ext, 9 + store i64 %mul, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @s_mul_i32: +; SI: S_LOAD_DWORD [[SRC0:s[0-9]+]], +; SI: S_LOAD_DWORD [[SRC1:s[0-9]+]], +; SI: S_MUL_I32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]] +; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]] +; SI: BUFFER_STORE_DWORD [[VRESULT]], +; SI: S_ENDPGM +define void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind { + %mul = mul i32 %a, %b + store i32 %mul, i32 addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: @v_mul_i32 +; SI: V_MUL_LO_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { + %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1 + %a = load i32 addrspace(1)* %in + %b = load i32 addrspace(1)* %b_ptr + %result = mul i32 %a, %b + store i32 %result, i32 addrspace(1)* %out + ret void +} + ; A standard 64-bit multiply. The expansion should be around 6 instructions. ; It would be difficult to match the expansion correctly without writing ; a really complicated list of FileCheck expressions. I don't want ; to confuse people who may 'break' this test with a correct optimization, ; so this test just uses FUNC-LABEL to make sure the compiler does not ; crash with a 'failed to select' error. -; FUNC-LABEL: @mul64 -define void @mul64(i64 addrspace(1)* %out, i64 %a, i64 %b) { -entry: - %0 = mul i64 %a, %b - store i64 %0, i64 addrspace(1)* %out + +; FUNC-LABEL: @s_mul_i64: +define void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind { + %mul = mul i64 %a, %b + store i64 %mul, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @v_mul_i64 +; SI: V_MUL_LO_I32 +define void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) { + %a = load i64 addrspace(1)* %aptr, align 8 + %b = load i64 addrspace(1)* %bptr, align 8 + %mul = mul i64 %a, %b + store i64 %mul, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @mul32_in_branch +; SI: V_MUL_LO_I32 +define void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) { +entry: + %0 = icmp eq i32 %a, 0 + br i1 %0, label %if, label %else + +if: + %1 = load i32 addrspace(1)* %in + br label %endif + +else: + %2 = mul i32 %a, %b + br label %endif + +endif: + %3 = phi i32 [%1, %if], [%2, %else] + store i32 %3, i32 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @mul64_in_branch +; SI-DAG: V_MUL_LO_I32 +; SI-DAG: V_MUL_HI_U32 +; SI: S_ENDPGM +define void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) { +entry: + %0 = icmp eq i64 %a, 0 + br i1 %0, label %if, label %else + +if: + %1 = load i64 addrspace(1)* %in + br label %endif + +else: + %2 = mul i64 %a, %b + br label %endif + +endif: + %3 = phi i64 [%1, %if], [%2, %else] + store i64 %3, i64 addrspace(1)* %out ret void } diff --git a/test/CodeGen/R600/sign_extend.ll b/test/CodeGen/R600/sign_extend.ll index e3bee507de6..dbc2320f21c 100644 --- a/test/CodeGen/R600/sign_extend.ll +++ b/test/CodeGen/R600/sign_extend.ll @@ -10,10 +10,10 @@ define void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind { ret void } -; SI-LABEL: @test: -; SI: V_ASHR +; SI-LABEL: @test_s_sext_i32_to_i64: +; SI: S_ASHR_I32 ; SI: S_ENDPG -define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind { +define void @test_s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind { entry: %mul = mul i32 %a, %b %add = add i32 %mul, %c