From b6520ab6250ce09d9f89eac926bfb0aeaad9e426 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Tue, 6 Jan 2015 23:00:37 +0000 Subject: [PATCH] R600/SI: Add class intrinsic git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225305 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/IntrinsicsR600.td | 3 + lib/Target/R600/AMDGPUISelDAGToDAG.cpp | 11 + lib/Target/R600/AMDGPUISelLowering.cpp | 5 + lib/Target/R600/AMDGPUISelLowering.h | 1 + lib/Target/R600/AMDGPUInstrInfo.td | 6 + lib/Target/R600/SIDefines.h | 15 ++ lib/Target/R600/SIInstrInfo.td | 41 ++- lib/Target/R600/SIInstructions.td | 8 +- test/CodeGen/R600/llvm.AMDGPU.class.ll | 335 +++++++++++++++++++++++++ 9 files changed, 420 insertions(+), 5 deletions(-) create mode 100644 test/CodeGen/R600/llvm.AMDGPU.class.ll diff --git a/include/llvm/IR/IntrinsicsR600.td b/include/llvm/IR/IntrinsicsR600.td index d99c42d5029..50556673822 100644 --- a/include/llvm/IR/IntrinsicsR600.td +++ b/include/llvm/IR/IntrinsicsR600.td @@ -76,6 +76,9 @@ def int_AMDGPU_rsq_clamped : GCCBuiltin<"__builtin_amdgpu_rsq_clamped">, def int_AMDGPU_ldexp : GCCBuiltin<"__builtin_amdgpu_ldexp">, Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>; +def int_AMDGPU_class : GCCBuiltin<"__builtin_amdgpu_class">, + Intrinsic<[llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>; + def int_AMDGPU_read_workdim : AMDGPUReadPreloadRegisterIntrinsic < "__builtin_amdgpu_read_workdim">; diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp index ad5a5417ee3..f78e52730e8 100644 --- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp @@ -113,6 +113,9 @@ private: bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods, SDValue &Omod) const; + bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods, + SDValue &Clamp, + SDValue &Omod) const; SDNode *SelectADD_SUB_I64(SDNode *N); SDNode *SelectDIV_SCALE(SDNode *N); @@ -1143,6 +1146,14 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, return SelectVOP3Mods(In, Src, SrcMods); } +bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, + SDValue &SrcMods, + SDValue &Clamp, + SDValue &Omod) const { + Clamp = Omod = CurDAG->getTargetConstant(0, MVT::i32); + return SelectVOP3Mods(In, Src, SrcMods); +} + void AMDGPUDAGToDAGISel::PostprocessISelDAG() { const AMDGPUTargetLowering& Lowering = *static_cast(getTargetLowering()); diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index a0f3b6b697b..2f23de0cb2e 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -1000,6 +1000,10 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, case AMDGPUIntrinsic::AMDGPU_brev: return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1)); + case Intrinsic::AMDGPU_class: + return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, + Op.getOperand(1), Op.getOperand(2)); + case AMDGPUIntrinsic::AMDIL_exp: // Legacy name. return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1)); @@ -2516,6 +2520,7 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(RSQ_LEGACY) NODE_NAME_CASE(RSQ_CLAMPED) NODE_NAME_CASE(LDEXP) + NODE_NAME_CASE(FP_CLASS) NODE_NAME_CASE(DOT4) NODE_NAME_CASE(BFE_U32) NODE_NAME_CASE(BFE_I32) diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/R600/AMDGPUISelLowering.h index 64ec0245f98..6571c0b00a5 100644 --- a/lib/Target/R600/AMDGPUISelLowering.h +++ b/lib/Target/R600/AMDGPUISelLowering.h @@ -234,6 +234,7 @@ enum { RSQ_LEGACY, RSQ_CLAMPED, LDEXP, + FP_CLASS, DOT4, BFE_U32, // Extract range of bits with zero extension to 32-bits. BFE_I32, // Extract range of bits with sign extension to 32-bits. diff --git a/lib/Target/R600/AMDGPUInstrInfo.td b/lib/Target/R600/AMDGPUInstrInfo.td index 9d988b60de9..0e34392bd50 100644 --- a/lib/Target/R600/AMDGPUInstrInfo.td +++ b/lib/Target/R600/AMDGPUInstrInfo.td @@ -27,6 +27,10 @@ def AMDGPULdExpOp : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>] >; +def AMDGPUFPClassOp : SDTypeProfile<1, 2, + [SDTCisInt<0>, SDTCisFP<1>, SDTCisInt<2>] +>; + def AMDGPUDivScaleOp : SDTypeProfile<2, 3, [SDTCisFP<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisSameAs<0, 4>] >; @@ -58,6 +62,8 @@ def AMDGPUrsq_clamped : SDNode<"AMDGPUISD::RSQ_CLAMPED", SDTFPUnaryOp>; def AMDGPUldexp : SDNode<"AMDGPUISD::LDEXP", AMDGPULdExpOp>; +def AMDGPUfp_class : SDNode<"AMDGPUISD::FP_CLASS", AMDGPUFPClassOp>; + // out = max(a, b) a and b are floats, where a nan comparison fails. // This is not commutative because this gives the second operand: // x < nan ? x : nan -> nan diff --git a/lib/Target/R600/SIDefines.h b/lib/Target/R600/SIDefines.h index 759ed1bba1a..82e9a859894 100644 --- a/lib/Target/R600/SIDefines.h +++ b/lib/Target/R600/SIDefines.h @@ -44,6 +44,21 @@ namespace SIInstrFlags { EXP_CNT = 1 << 1, LGKM_CNT = 1 << 2 }; + + // v_cmp_class_* etc. use a 10-bit mask for what operation is checked. + // The result is true if any of these tests are true. + enum ClassFlags { + S_NAN = 1 << 0, // Signaling NaN + Q_NAN = 1 << 1, // Quiet NaN + N_INFINITY = 1 << 2, // Negative infinity + N_NORMAL = 1 << 3, // Negative normal + N_SUBNORMAL = 1 << 4, // Negative subnormal + N_ZERO = 1 << 5, // Negative zero + P_ZERO = 1 << 6, // Positive zero + P_SUBNORMAL = 1 << 7, // Positive subnormal + P_NORMAL = 1 << 8, // Positive normal + P_INFINITY = 1 << 9 // Positive infinity + }; } namespace SISrcMods { diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td index 0471c6dbf4c..c74ad83b9e5 100644 --- a/lib/Target/R600/SIInstrInfo.td +++ b/lib/Target/R600/SIInstrInfo.td @@ -283,6 +283,7 @@ def MUBUFOffsetAtomic : ComplexPattern; def VOP3Mods0 : ComplexPattern; def VOP3Mods0Clamp : ComplexPattern; +def VOP3Mods0Clamp0OMod : ComplexPattern; def VOP3Mods : ComplexPattern; //===----------------------------------------------------------------------===// @@ -611,7 +612,9 @@ class getNumSrcArgs { // Returns the register class to use for the destination of VOP[123C] // instructions for the given VT. class getVALUDstForVT { - RegisterClass ret = !if(!eq(VT.Size, 32), VReg_32, VReg_64); + RegisterClass ret = !if(!eq(VT.Size, 32), VReg_32, + !if(!eq(VT.Size, 64), VReg_64, + SReg_64)); // else VT == i1 } // Returns the register class to use for source 0 of VOP[12C] @@ -775,6 +778,17 @@ def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>; def VOP_I32_I32_I32_VCC : VOPProfile <[i32, i32, i32, untyped]> { let Src0RC32 = VCSrc_32; } + +def VOP_I1_F32_I32 : VOPProfile <[i1, f32, i32, untyped]> { + let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1); + let Asm64 = " $dst, $src0_modifiers, $src1"; +} + +def VOP_I1_F64_I32 : VOPProfile <[i1, f64, i32, untyped]> { + let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1); + let Asm64 = " $dst, $src0_modifiers, $src1"; +} + def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>; def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>; @@ -1111,6 +1125,19 @@ multiclass VOPCInst ; +multiclass VOPCClassInst : VOPC_Helper < + op, opName, + P.Ins32, P.Asm32, [], + (outs SReg_64:$dst), P.Ins64, P.Asm64, + !if(P.HasModifiers, + [(set i1:$dst, + (AMDGPUfp_class (P.Src0VT (VOP3Mods0Clamp0OMod P.Src0VT:$src0, i32:$src0_modifiers)), P.Src1VT:$src1))], + [(set i1:$dst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]), + P.HasModifiers, DefExec +>; + + multiclass VOPC_F32 : VOPCInst ; @@ -1145,6 +1172,18 @@ multiclass VOP3_Helper ; +multiclass VOPC_CLASS_F32 : + VOPCClassInst ; + +multiclass VOPCX_CLASS_F32 : + VOPCClassInst ; + +multiclass VOPC_CLASS_F64 : + VOPCClassInst ; + +multiclass VOPCX_CLASS_F64 : + VOPCClassInst ; + multiclass VOP3Inst : VOP3_Helper < op, opName, P.Outs, P.Ins64, P.Asm64, diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td index 463287eaa97..c130a7625ce 100644 --- a/lib/Target/R600/SIInstructions.td +++ b/lib/Target/R600/SIInstructions.td @@ -745,16 +745,16 @@ defm V_CMPX_T_U64 : VOPCX_I64 , "v_cmpx_t_u64">; } // End hasSideEffects = 1 -defm V_CMP_CLASS_F32 : VOPC_F32 , "v_cmp_class_f32">; +defm V_CMP_CLASS_F32 : VOPC_CLASS_F32 , "v_cmp_class_f32">; let hasSideEffects = 1 in { -defm V_CMPX_CLASS_F32 : VOPCX_F32 , "v_cmpx_class_f32">; +defm V_CMPX_CLASS_F32 : VOPCX_CLASS_F32 , "v_cmpx_class_f32">; } // End hasSideEffects = 1 -defm V_CMP_CLASS_F64 : VOPC_F64 , "v_cmp_class_f64">; +defm V_CMP_CLASS_F64 : VOPC_CLASS_F64 , "v_cmp_class_f64">; let hasSideEffects = 1 in { -defm V_CMPX_CLASS_F64 : VOPCX_F64 , "v_cmpx_class_f64">; +defm V_CMPX_CLASS_F64 : VOPCX_CLASS_F64 , "v_cmpx_class_f64">; } // End hasSideEffects = 1 } // End isCompare = 1 diff --git a/test/CodeGen/R600/llvm.AMDGPU.class.ll b/test/CodeGen/R600/llvm.AMDGPU.class.ll new file mode 100644 index 00000000000..f27cec1d7b9 --- /dev/null +++ b/test/CodeGen/R600/llvm.AMDGPU.class.ll @@ -0,0 +1,335 @@ +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s + +declare i1 @llvm.AMDGPU.class.f32(float, i32) #1 +declare i1 @llvm.AMDGPU.class.f64(double, i32) #1 +declare i32 @llvm.r600.read.tidig.x() #1 +declare float @llvm.fabs.f32(float) #1 +declare double @llvm.fabs.f64(double) #1 + +; SI-LABEL: {{^}}test_class_f32: +; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc +; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] +; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 { + %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_fabs_f32: +; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc +; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] +; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 { + %a.fabs = call float @llvm.fabs.f32(float %a) #1 + %result = call i1 @llvm.AMDGPU.class.f32(float %a.fabs, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_fneg_f32: +; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc +; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] +; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_fneg_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 { + %a.fneg = fsub float -0.0, %a + %result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_fneg_fabs_f32: +; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc +; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] +; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_fneg_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 { + %a.fabs = call float @llvm.fabs.f32(float %a) #1 + %a.fneg.fabs = fsub float -0.0, %a.fabs + %result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg.fabs, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_1_f32: +; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 1{{$}} +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]] +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_1_f32(i32 addrspace(1)* %out, float %a) #0 { + %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_64_f32: +; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 64{{$}} +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]] +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_64_f32(i32 addrspace(1)* %out, float %a) #0 { + %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 64) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; Set all 10 bits of mask +; SI-LABEL: {{^}}test_class_full_mask_f32: +; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x3ff{{$}} +; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_full_mask_f32(i32 addrspace(1)* %out, float %a) #0 { + %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1023) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_9bit_mask_f32: +; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}} +; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_9bit_mask_f32(i32 addrspace(1)* %out, float %a) #0 { + %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}v_test_class_full_mask_f32: +; SI-DAG: buffer_load_dword [[VA:v[0-9]+]] +; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}} +; SI: v_cmp_class_f32_e32 vcc, [[VA]], [[MASK]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 { + %tid = call i32 @llvm.r600.read.tidig.x() #1 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %a = load float addrspace(1)* %gep.in + + %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %gep.out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f32: +; SI-DAG: buffer_load_dword [[VB:v[0-9]+]] +; SI: v_cmp_class_f32_e32 vcc, 1.0, [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %tid = call i32 @llvm.r600.read.tidig.x() #1 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %b = load i32 addrspace(1)* %gep.in + + %result = call i1 @llvm.AMDGPU.class.f32(float 1.0, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %gep.out, align 4 + ret void +} + +; FIXME: Why isn't this using a literal constant operand? +; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f32: +; SI-DAG: buffer_load_dword [[VB:v[0-9]+]] +; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000 +; SI: v_cmp_class_f32_e32 vcc, [[VK]], [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %tid = call i32 @llvm.r600.read.tidig.x() #1 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %b = load i32 addrspace(1)* %gep.in + + %result = call i1 @llvm.AMDGPU.class.f32(float 1024.0, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %gep.out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_f64: +; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd +; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] +; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 { + %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_fabs_f64: +; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd +; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] +; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 { + %a.fabs = call double @llvm.fabs.f64(double %a) #1 + %result = call i1 @llvm.AMDGPU.class.f64(double %a.fabs, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_fneg_f64: +; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd +; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] +; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_fneg_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 { + %a.fneg = fsub double -0.0, %a + %result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_fneg_fabs_f64: +; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd +; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] +; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_fneg_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 { + %a.fabs = call double @llvm.fabs.f64(double %a) #1 + %a.fneg.fabs = fsub double -0.0, %a.fabs + %result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg.fabs, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_1_f64: +; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 1{{$}} +; SI: s_endpgm +define void @test_class_1_f64(i32 addrspace(1)* %out, double %a) #0 { + %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 1) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_64_f64: +; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 64{{$}} +; SI: s_endpgm +define void @test_class_64_f64(i32 addrspace(1)* %out, double %a) #0 { + %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 64) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; Set all 9 bits of mask +; SI-LABEL: {{^}}test_class_full_mask_f64: +; SI: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}} +; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[MASK]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI-NEXT: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @test_class_full_mask_f64(i32 addrspace(1)* %out, double %a) #0 { + %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %out, align 4 + ret void +} + +; SI-LABEL: {{^}}v_test_class_full_mask_f64: +; SI-DAG: buffer_load_dwordx2 [[VA:v\[[0-9]+:[0-9]+\]]] +; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}} +; SI: v_cmp_class_f64_e32 vcc, [[VA]], [[MASK]] +; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc +; SI: buffer_store_dword [[RESULT]] +; SI: s_endpgm +define void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #0 { + %tid = call i32 @llvm.r600.read.tidig.x() #1 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %a = load double addrspace(1)* %in + + %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %gep.out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f64: +; XSI: v_cmp_class_f64_e32 vcc, 1.0, +; SI: v_cmp_class_f64_e32 vcc, +; SI: s_endpgm +define void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %tid = call i32 @llvm.r600.read.tidig.x() #1 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %b = load i32 addrspace(1)* %gep.in + + %result = call i1 @llvm.AMDGPU.class.f64(double 1.0, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %gep.out, align 4 + ret void +} + +; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f64: +; SI: v_cmp_class_f64_e32 vcc, s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}} +; SI: s_endpgm +define void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %tid = call i32 @llvm.r600.read.tidig.x() #1 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %b = load i32 addrspace(1)* %gep.in + + %result = call i1 @llvm.AMDGPU.class.f64(double 1024.0, i32 %b) #1 + %sext = sext i1 %result to i32 + store i32 %sext, i32 addrspace(1)* %gep.out, align 4 + ret void +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone }