diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp index 77fce3848fc..7341cd97e61 100644 --- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp @@ -121,6 +121,11 @@ private: SDNode *SelectADD_SUB_I64(SDNode *N); SDNode *SelectDIV_SCALE(SDNode *N); + SDNode *getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val, + uint32_t Offset, uint32_t Width); + SDNode *SelectS_BFEFromShifts(SDNode *N); + SDNode *SelectS_BFE(SDNode *N); + // Include the pieces autogenerated from the target description. #include "AMDGPUGenDAGISel.inc" }; @@ -520,21 +525,11 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { bool Signed = Opc == AMDGPUISD::BFE_I32; - // Transformation function, pack the offset and width of a BFE into - // the format expected by the S_BFE_I32 / S_BFE_U32. In the second - // source, bits [5:0] contain the offset and bits [22:16] the width. - uint32_t OffsetVal = Offset->getZExtValue(); uint32_t WidthVal = Width->getZExtValue(); - uint32_t PackedVal = OffsetVal | WidthVal << 16; - - SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32); - return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, - SDLoc(N), - MVT::i32, - N->getOperand(0), - PackedOffsetWidth); + return getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, SDLoc(N), + N->getOperand(0), OffsetVal, WidthVal); } case AMDGPUISD::DIV_SCALE: { @@ -548,6 +543,14 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { } case ISD::ADDRSPACECAST: return SelectAddrSpaceCast(N); + case ISD::AND: + case ISD::SRL: + case ISD::SRA: + if (N->getValueType(0) != MVT::i32 || + Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) + break; + + return SelectS_BFE(N); } return SelectCode(N); @@ -1150,6 +1153,95 @@ SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) { return CurDAG->getNode(ISD::BITCAST, DL, DestVT, Src).getNode(); } +SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val, + uint32_t Offset, uint32_t Width) { + // Transformation function, pack the offset and width of a BFE into + // the format expected by the S_BFE_I32 / S_BFE_U32. In the second + // source, bits [5:0] contain the offset and bits [22:16] the width. + uint32_t PackedVal = Offset | (Width << 16); + SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, MVT::i32); + + return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst); +} + +SDNode *AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) { + // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c) + // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c) + // Predicate: 0 < b <= c < 32 + + const SDValue &Shl = N->getOperand(0); + ConstantSDNode *B = dyn_cast(Shl->getOperand(1)); + ConstantSDNode *C = dyn_cast(N->getOperand(1)); + + if (B && C) { + uint32_t BVal = B->getZExtValue(); + uint32_t CVal = C->getZExtValue(); + + if (0 < BVal && BVal <= CVal && CVal < 32) { + bool Signed = N->getOpcode() == ISD::SRA; + unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; + + return getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), + CVal - BVal, 32 - CVal); + } + } + return SelectCode(N); +} + +SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) { + switch (N->getOpcode()) { + case ISD::AND: + if (N->getOperand(0).getOpcode() == ISD::SRL) { + // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)" + // Predicate: isMask(mask) + const SDValue &Srl = N->getOperand(0); + ConstantSDNode *Shift = dyn_cast(Srl.getOperand(1)); + ConstantSDNode *Mask = dyn_cast(N->getOperand(1)); + + if (Shift && Mask) { + uint32_t ShiftVal = Shift->getZExtValue(); + uint32_t MaskVal = Mask->getZExtValue(); + + if (isMask_32(MaskVal)) { + uint32_t WidthVal = countPopulation(MaskVal); + + return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), Srl.getOperand(0), + ShiftVal, WidthVal); + } + } + } + break; + case ISD::SRL: + if (N->getOperand(0).getOpcode() == ISD::AND) { + // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)" + // Predicate: isMask(mask >> b) + const SDValue &And = N->getOperand(0); + ConstantSDNode *Shift = dyn_cast(N->getOperand(1)); + ConstantSDNode *Mask = dyn_cast(And->getOperand(1)); + + if (Shift && Mask) { + uint32_t ShiftVal = Shift->getZExtValue(); + uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal; + + if (isMask_32(MaskVal)) { + uint32_t WidthVal = countPopulation(MaskVal); + + return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), And.getOperand(0), + ShiftVal, WidthVal); + } + } + } else if (N->getOperand(0).getOpcode() == ISD::SHL) + return SelectS_BFEFromShifts(N); + break; + case ISD::SRA: + if (N->getOperand(0).getOpcode() == ISD::SHL) + return SelectS_BFEFromShifts(N); + break; + } + + return SelectCode(N); +} + bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const { diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll index ffd3d6c7299..1168713ca66 100644 --- a/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll +++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll @@ -96,7 +96,6 @@ define void @bfe_i32_test_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounw ret void } -; FIXME: The shifts should be 1 BFE ; FUNC-LABEL: {{^}}bfe_i32_test_8: ; SI: buffer_load_dword ; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 1 @@ -407,16 +406,12 @@ define void @bfe_i32_constant_fold_test_18(i32 addrspace(1)* %out) nounwind { ret void } -; XXX - This should really be a single BFE, but the sext_inreg of the -; extended type i24 is never custom lowered. ; FUNC-LABEL: {{^}}bfe_sext_in_reg_i24: ; SI: buffer_load_dword [[LOAD:v[0-9]+]], -; SI: v_lshlrev_b32_e32 {{v[0-9]+}}, 8, {{v[0-9]+}} -; SI: v_ashrrev_i32_e32 {{v[0-9]+}}, 8, {{v[0-9]+}} -; XSI: v_bfe_i32 [[BFE:v[0-9]+]], [[LOAD]], 0, 8 -; XSI-NOT: SHL -; XSI-NOT: SHR -; XSI: buffer_store_dword [[BFE]], +; SI-NOT: v_lshl +; SI-NOT: v_ashr +; SI: v_bfe_i32 [[BFE:v[0-9]+]], [[LOAD]], 0, 24 +; SI: buffer_store_dword [[BFE]], define void @bfe_sext_in_reg_i24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind { %x = load i32, i32 addrspace(1)* %in, align 4 %bfe = call i32 @llvm.AMDGPU.bfe.i32(i32 %x, i32 0, i32 24) diff --git a/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll index 83bdb150ef5..0426b3ab603 100644 --- a/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll +++ b/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll @@ -439,7 +439,7 @@ define void @bfe_u32_constant_fold_test_8(i32 addrspace(1)* %out) nounwind { ; SI: v_mov_b32_e32 [[VREG:v[0-9]+]], 1 ; SI: buffer_store_dword [[VREG]], ; SI: s_endpgm -; EG-NOT: BFEfppppppppppppp +; EG-NOT: BFE define void @bfe_u32_constant_fold_test_9(i32 addrspace(1)* %out) nounwind { %bfe_u32 = call i32 @llvm.AMDGPU.bfe.u32(i32 65536, i32 16, i32 8) nounwind readnone store i32 %bfe_u32, i32 addrspace(1)* %out, align 4 @@ -575,3 +575,43 @@ define void @simplify_bfe_u32_multi_use_arg(i32 addrspace(1)* %out0, store i32 %and, i32 addrspace(1)* %out1, align 4 ret void } + +; FUNC-LABEL: {{^}}lshr_and: +; SI: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006 +; SI: buffer_store_dword +define void @lshr_and(i32 addrspace(1)* %out, i32 %a) nounwind { + %b = lshr i32 %a, 6 + %c = and i32 %b, 7 + store i32 %c, i32 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: {{^}}and_lshr: +; SI: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006 +; SI: buffer_store_dword +define void @and_lshr(i32 addrspace(1)* %out, i32 %a) nounwind { + %b = and i32 %a, 448 + %c = lshr i32 %b, 6 + store i32 %c, i32 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: {{^}}and_lshr2: +; SI: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x30006 +; SI: buffer_store_dword +define void @and_lshr2(i32 addrspace(1)* %out, i32 %a) nounwind { + %b = and i32 %a, 511 + %c = lshr i32 %b, 6 + store i32 %c, i32 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: {{^}}shl_lshr: +; SI: s_bfe_u32 {{s[0-9]+}}, {{s[0-9]+}}, 0x150002 +; SI: buffer_store_dword +define void @shl_lshr(i32 addrspace(1)* %out, i32 %a) nounwind { + %b = shl i32 %a, 9 + %c = lshr i32 %b, 11 + store i32 %c, i32 addrspace(1)* %out, align 8 + ret void +} diff --git a/test/CodeGen/R600/sext-in-reg.ll b/test/CodeGen/R600/sext-in-reg.ll index c637bec80a8..d9ad4935968 100644 --- a/test/CodeGen/R600/sext-in-reg.ll +++ b/test/CodeGen/R600/sext-in-reg.ll @@ -263,9 +263,9 @@ define void @v_sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* } ; FUNC-LABEL: {{^}}sext_in_reg_i1_in_i32_other_amount: -; SI-NOT: {{[^@]}}bfe -; SI: s_lshl_b32 [[REG:s[0-9]+]], {{s[0-9]+}}, 6 -; SI: s_ashr_i32 {{s[0-9]+}}, [[REG]], 7 +; SI-NOT: s_lshl +; SI-NOT: s_ashr +; SI: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x190001 ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+\.[XYZW]]], [[ADDR:T[0-9]+.[XYZW]]] ; EG-NOT: BFE @@ -282,10 +282,10 @@ define void @sext_in_reg_i1_in_i32_other_amount(i32 addrspace(1)* %out, i32 %a, } ; FUNC-LABEL: {{^}}sext_in_reg_v2i1_in_v2i32_other_amount: -; SI-DAG: s_lshl_b32 [[REG0:s[0-9]+]], {{s[0-9]}}, 6 -; SI-DAG: s_ashr_i32 {{s[0-9]+}}, [[REG0]], 7 -; SI-DAG: s_lshl_b32 [[REG1:s[0-9]+]], {{s[0-9]}}, 6 -; SI-DAG: s_ashr_i32 {{s[0-9]+}}, [[REG1]], 7 +; SI-NOT: s_lshl +; SI-NOT: s_ashr +; SI-DAG: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x190001 +; SI-DAG: s_bfe_i32 {{s[0-9]+}}, {{s[0-9]+}}, 0x190001 ; SI: s_endpgm ; EG: MEM_{{.*}} STORE_{{.*}} [[RES:T[0-9]+]]{{\.[XYZW][XYZW]}}, [[ADDR:T[0-9]+.[XYZW]]] @@ -599,8 +599,9 @@ define void @sext_in_reg_i1_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1 ; FUNC-LABEL: {{^}}sext_in_reg_i2_bfe_offset_1: ; SI: buffer_load_dword -; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, 30, v{{[0-9]+}} -; SI: v_ashrrev_i32_e32 v{{[0-9]+}}, 30, v{{[0-9]+}} +; SI-NOT: v_lshl +; SI-NOT: v_ashr +; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 2 ; SI: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 1, 2 ; SI: s_endpgm define void @sext_in_reg_i2_bfe_offset_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {