R600/SI: Add class intrinsic

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225305 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Matt Arsenault 2015-01-06 23:00:37 +00:00
parent 374b57cec9
commit b6520ab625
9 changed files with 420 additions and 5 deletions

View File

@ -76,6 +76,9 @@ def int_AMDGPU_rsq_clamped : GCCBuiltin<"__builtin_amdgpu_rsq_clamped">,
def int_AMDGPU_ldexp : GCCBuiltin<"__builtin_amdgpu_ldexp">,
Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_class : GCCBuiltin<"__builtin_amdgpu_class">,
Intrinsic<[llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
def int_AMDGPU_read_workdim : AMDGPUReadPreloadRegisterIntrinsic <
"__builtin_amdgpu_read_workdim">;

View File

@ -113,6 +113,9 @@ private:
bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods,
SDValue &Omod) const;
bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
SDValue &Clamp,
SDValue &Omod) const;
SDNode *SelectADD_SUB_I64(SDNode *N);
SDNode *SelectDIV_SCALE(SDNode *N);
@ -1143,6 +1146,14 @@ bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
return SelectVOP3Mods(In, Src, SrcMods);
}
bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
SDValue &SrcMods,
SDValue &Clamp,
SDValue &Omod) const {
Clamp = Omod = CurDAG->getTargetConstant(0, MVT::i32);
return SelectVOP3Mods(In, Src, SrcMods);
}
void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
const AMDGPUTargetLowering& Lowering =
*static_cast<const AMDGPUTargetLowering*>(getTargetLowering());

View File

@ -1000,6 +1000,10 @@ SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case AMDGPUIntrinsic::AMDGPU_brev:
return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
case Intrinsic::AMDGPU_class:
return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
Op.getOperand(1), Op.getOperand(2));
case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
@ -2516,6 +2520,7 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(RSQ_LEGACY)
NODE_NAME_CASE(RSQ_CLAMPED)
NODE_NAME_CASE(LDEXP)
NODE_NAME_CASE(FP_CLASS)
NODE_NAME_CASE(DOT4)
NODE_NAME_CASE(BFE_U32)
NODE_NAME_CASE(BFE_I32)

View File

@ -234,6 +234,7 @@ enum {
RSQ_LEGACY,
RSQ_CLAMPED,
LDEXP,
FP_CLASS,
DOT4,
BFE_U32, // Extract range of bits with zero extension to 32-bits.
BFE_I32, // Extract range of bits with sign extension to 32-bits.

View File

@ -27,6 +27,10 @@ def AMDGPULdExpOp : SDTypeProfile<1, 2,
[SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>]
>;
def AMDGPUFPClassOp : SDTypeProfile<1, 2,
[SDTCisInt<0>, SDTCisFP<1>, SDTCisInt<2>]
>;
def AMDGPUDivScaleOp : SDTypeProfile<2, 3,
[SDTCisFP<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisSameAs<0, 4>]
>;
@ -58,6 +62,8 @@ def AMDGPUrsq_clamped : SDNode<"AMDGPUISD::RSQ_CLAMPED", SDTFPUnaryOp>;
def AMDGPUldexp : SDNode<"AMDGPUISD::LDEXP", AMDGPULdExpOp>;
def AMDGPUfp_class : SDNode<"AMDGPUISD::FP_CLASS", AMDGPUFPClassOp>;
// out = max(a, b) a and b are floats, where a nan comparison fails.
// This is not commutative because this gives the second operand:
// x < nan ? x : nan -> nan

View File

@ -44,6 +44,21 @@ namespace SIInstrFlags {
EXP_CNT = 1 << 1,
LGKM_CNT = 1 << 2
};
// v_cmp_class_* etc. use a 10-bit mask for what operation is checked.
// The result is true if any of these tests are true.
enum ClassFlags {
S_NAN = 1 << 0, // Signaling NaN
Q_NAN = 1 << 1, // Quiet NaN
N_INFINITY = 1 << 2, // Negative infinity
N_NORMAL = 1 << 3, // Negative normal
N_SUBNORMAL = 1 << 4, // Negative subnormal
N_ZERO = 1 << 5, // Negative zero
P_ZERO = 1 << 6, // Positive zero
P_SUBNORMAL = 1 << 7, // Positive subnormal
P_NORMAL = 1 << 8, // Positive normal
P_INFINITY = 1 << 9 // Positive infinity
};
}
namespace SISrcMods {

View File

@ -283,6 +283,7 @@ def MUBUFOffsetAtomic : ComplexPattern<i64, 4, "SelectMUBUFOffset">;
def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
def VOP3Mods0Clamp : ComplexPattern<untyped, 3, "SelectVOP3Mods0Clamp">;
def VOP3Mods0Clamp0OMod : ComplexPattern<untyped, 4, "SelectVOP3Mods0Clamp0OMod">;
def VOP3Mods : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
//===----------------------------------------------------------------------===//
@ -611,7 +612,9 @@ class getNumSrcArgs<ValueType Src1, ValueType Src2> {
// Returns the register class to use for the destination of VOP[123C]
// instructions for the given VT.
class getVALUDstForVT<ValueType VT> {
RegisterClass ret = !if(!eq(VT.Size, 32), VReg_32, VReg_64);
RegisterClass ret = !if(!eq(VT.Size, 32), VReg_32,
!if(!eq(VT.Size, 64), VReg_64,
SReg_64)); // else VT == i1
}
// Returns the register class to use for source 0 of VOP[12C]
@ -775,6 +778,17 @@ def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
def VOP_I32_I32_I32_VCC : VOPProfile <[i32, i32, i32, untyped]> {
let Src0RC32 = VCSrc_32;
}
def VOP_I1_F32_I32 : VOPProfile <[i1, f32, i32, untyped]> {
let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
let Asm64 = " $dst, $src0_modifiers, $src1";
}
def VOP_I1_F64_I32 : VOPProfile <[i1, f64, i32, untyped]> {
let Ins64 = (ins InputModsNoDefault:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
let Asm64 = " $dst, $src0_modifiers, $src1";
}
def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>;
@ -1111,6 +1125,19 @@ multiclass VOPCInst <vopc op, string opName,
P.HasModifiers, DefExec
>;
multiclass VOPCClassInst <vopc op, string opName, VOPProfile P,
bit DefExec = 0> : VOPC_Helper <
op, opName,
P.Ins32, P.Asm32, [],
(outs SReg_64:$dst), P.Ins64, P.Asm64,
!if(P.HasModifiers,
[(set i1:$dst,
(AMDGPUfp_class (P.Src0VT (VOP3Mods0Clamp0OMod P.Src0VT:$src0, i32:$src0_modifiers)), P.Src1VT:$src1))],
[(set i1:$dst, (AMDGPUfp_class P.Src0VT:$src0, P.Src1VT:$src1))]),
P.HasModifiers, DefExec
>;
multiclass VOPC_F32 <vopc op, string opName, PatLeaf cond = COND_NULL> :
VOPCInst <op, opName, VOP_F32_F32_F32, cond>;
@ -1145,6 +1172,18 @@ multiclass VOP3_Helper <vop3 op, string opName, dag outs, dag ins, string asm,
op, outs, ins, opName#asm, pat, opName, NumSrcArgs, HasMods
>;
multiclass VOPC_CLASS_F32 <vopc op, string opName> :
VOPCClassInst <op, opName, VOP_I1_F32_I32, 0>;
multiclass VOPCX_CLASS_F32 <vopc op, string opName> :
VOPCClassInst <op, opName, VOP_I1_F32_I32, 1>;
multiclass VOPC_CLASS_F64 <vopc op, string opName> :
VOPCClassInst <op, opName, VOP_I1_F64_I32, 0>;
multiclass VOPCX_CLASS_F64 <vopc op, string opName> :
VOPCClassInst <op, opName, VOP_I1_F64_I32, 1>;
multiclass VOP3Inst <vop3 op, string opName, VOPProfile P,
SDPatternOperator node = null_frag> : VOP3_Helper <
op, opName, P.Outs, P.Ins64, P.Asm64,

View File

@ -745,16 +745,16 @@ defm V_CMPX_T_U64 : VOPCX_I64 <vopc<0xf7, 0xff>, "v_cmpx_t_u64">;
} // End hasSideEffects = 1
defm V_CMP_CLASS_F32 : VOPC_F32 <vopc<0x88, 0x10>, "v_cmp_class_f32">;
defm V_CMP_CLASS_F32 : VOPC_CLASS_F32 <vopc<0x88, 0x10>, "v_cmp_class_f32">;
let hasSideEffects = 1 in {
defm V_CMPX_CLASS_F32 : VOPCX_F32 <vopc<0x98, 0x11>, "v_cmpx_class_f32">;
defm V_CMPX_CLASS_F32 : VOPCX_CLASS_F32 <vopc<0x98, 0x11>, "v_cmpx_class_f32">;
} // End hasSideEffects = 1
defm V_CMP_CLASS_F64 : VOPC_F64 <vopc<0xa8, 0x12>, "v_cmp_class_f64">;
defm V_CMP_CLASS_F64 : VOPC_CLASS_F64 <vopc<0xa8, 0x12>, "v_cmp_class_f64">;
let hasSideEffects = 1 in {
defm V_CMPX_CLASS_F64 : VOPCX_F64 <vopc<0xb8, 0x13>, "v_cmpx_class_f64">;
defm V_CMPX_CLASS_F64 : VOPCX_CLASS_F64 <vopc<0xb8, 0x13>, "v_cmpx_class_f64">;
} // End hasSideEffects = 1
} // End isCompare = 1

View File

@ -0,0 +1,335 @@
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
declare i1 @llvm.AMDGPU.class.f32(float, i32) #1
declare i1 @llvm.AMDGPU.class.f64(double, i32) #1
declare i32 @llvm.r600.read.tidig.x() #1
declare float @llvm.fabs.f32(float) #1
declare double @llvm.fabs.f64(double) #1
; SI-LABEL: {{^}}test_class_f32:
; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_fabs_f32:
; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%a.fabs = call float @llvm.fabs.f32(float %a) #1
%result = call i1 @llvm.AMDGPU.class.f32(float %a.fabs, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_fneg_f32:
; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_fneg_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%a.fneg = fsub float -0.0, %a
%result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_fneg_fabs_f32:
; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_fneg_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
%a.fabs = call float @llvm.fabs.f32(float %a) #1
%a.fneg.fabs = fsub float -0.0, %a.fabs
%result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg.fabs, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_1_f32:
; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 1{{$}}
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_1_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_64_f32:
; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 64{{$}}
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_64_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 64) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; Set all 10 bits of mask
; SI-LABEL: {{^}}test_class_full_mask_f32:
; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x3ff{{$}}
; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_full_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1023) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_9bit_mask_f32:
; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_9bit_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
%result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}v_test_class_full_mask_f32:
; SI-DAG: buffer_load_dword [[VA:v[0-9]+]]
; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
; SI: v_cmp_class_f32_e32 vcc, [[VA]], [[MASK]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.in = getelementptr float addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %gep.out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f32:
; SI-DAG: buffer_load_dword [[VB:v[0-9]+]]
; SI: v_cmp_class_f32_e32 vcc, 1.0, [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
%b = load i32 addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f32(float 1.0, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %gep.out, align 4
ret void
}
; FIXME: Why isn't this using a literal constant operand?
; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f32:
; SI-DAG: buffer_load_dword [[VB:v[0-9]+]]
; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
; SI: v_cmp_class_f32_e32 vcc, [[VK]], [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
%b = load i32 addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f32(float 1024.0, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %gep.out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_f64:
; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
%result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_fabs_f64:
; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
%a.fabs = call double @llvm.fabs.f64(double %a) #1
%result = call i1 @llvm.AMDGPU.class.f64(double %a.fabs, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_fneg_f64:
; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_fneg_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
%a.fneg = fsub double -0.0, %a
%result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_fneg_fabs_f64:
; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_fneg_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
%a.fabs = call double @llvm.fabs.f64(double %a) #1
%a.fneg.fabs = fsub double -0.0, %a.fabs
%result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg.fabs, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_1_f64:
; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 1{{$}}
; SI: s_endpgm
define void @test_class_1_f64(i32 addrspace(1)* %out, double %a) #0 {
%result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 1) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_64_f64:
; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 64{{$}}
; SI: s_endpgm
define void @test_class_64_f64(i32 addrspace(1)* %out, double %a) #0 {
%result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 64) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; Set all 9 bits of mask
; SI-LABEL: {{^}}test_class_full_mask_f64:
; SI: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[MASK]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI-NEXT: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @test_class_full_mask_f64(i32 addrspace(1)* %out, double %a) #0 {
%result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %out, align 4
ret void
}
; SI-LABEL: {{^}}v_test_class_full_mask_f64:
; SI-DAG: buffer_load_dwordx2 [[VA:v\[[0-9]+:[0-9]+\]]]
; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
; SI: v_cmp_class_f64_e32 vcc, [[VA]], [[MASK]]
; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
; SI: buffer_store_dword [[RESULT]]
; SI: s_endpgm
define void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.in = getelementptr double addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
%a = load double addrspace(1)* %in
%result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %gep.out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f64:
; XSI: v_cmp_class_f64_e32 vcc, 1.0,
; SI: v_cmp_class_f64_e32 vcc,
; SI: s_endpgm
define void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
%b = load i32 addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f64(double 1.0, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %gep.out, align 4
ret void
}
; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f64:
; SI: v_cmp_class_f64_e32 vcc, s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}
; SI: s_endpgm
define void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
%gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
%gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
%b = load i32 addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f64(double 1024.0, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, i32 addrspace(1)* %gep.out, align 4
ret void
}
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }