From c882fc78fe2f3ffcee94918523e1de6e68efde4f Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 15 Aug 2014 18:42:15 +0000 Subject: [PATCH] R600/SI: Use source modifier for f64 fabs git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@215747 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/R600/AMDGPUISelLowering.cpp | 2 +- lib/Target/R600/SIISelLowering.cpp | 30 +++++++- test/CodeGen/R600/fabs.f64.ll | 97 ++++++++++++++++++++++++++ 3 files changed, 127 insertions(+), 2 deletions(-) create mode 100644 test/CodeGen/R600/fabs.f64.ll diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index b30c9441524..6d7438399fe 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -441,7 +441,7 @@ bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { assert(VT.isFloatingPoint()); - return VT == MVT::f32; + return VT == MVT::f32 || VT == MVT::f64; } bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index 6d2e9575e59..508ed2a9a9a 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -226,7 +226,6 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) : // FIXME: These should be removed and handled the same was as f32 fneg. Source // modifiers also work for the double instructions. setOperationAction(ISD::FNEG, MVT::f64, Expand); - setOperationAction(ISD::FABS, MVT::f64, Expand); setOperationAction(ISD::FDIV, MVT::f32, Custom); @@ -665,6 +664,35 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( MI->eraseFromParent(); break; } + case AMDGPU::FABS64_SI: { + MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); + const SIInstrInfo *TII = static_cast( + getTargetMachine().getSubtargetImpl()->getInstrInfo()); + + DebugLoc DL = MI->getDebugLoc(); + unsigned SuperReg = MI->getOperand(0).getReg(); + unsigned SrcReg = MI->getOperand(1).getReg(); + + unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass); + + // Copy the subregister to make sure it is the right register class. + unsigned VReg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass); + BuildMI(*BB, I, DL, TII->get(AMDGPU::COPY), VReg) + .addReg(SrcReg, 0, AMDGPU::sub1); + + // We only need to mask the upper half of the register pair. + BuildMI(*BB, I, DL, TII->get(AMDGPU::V_AND_B32_e32), TmpReg) + .addImm(0x7fffffff) + .addReg(VReg); + + BuildMI(*BB, I, DL, TII->get(AMDGPU::REG_SEQUENCE), SuperReg) + .addReg(SrcReg, 0, AMDGPU::sub0) + .addImm(AMDGPU::sub0) + .addReg(TmpReg) + .addImm(AMDGPU::sub1); + MI->eraseFromParent(); + break; + } case AMDGPU::FNEG_SI: { MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); const SIInstrInfo *TII = static_cast( diff --git a/test/CodeGen/R600/fabs.f64.ll b/test/CodeGen/R600/fabs.f64.ll new file mode 100644 index 00000000000..1e1bdf4f7e9 --- /dev/null +++ b/test/CodeGen/R600/fabs.f64.ll @@ -0,0 +1,97 @@ +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s + +declare i32 @llvm.r600.read.tidig.x() nounwind readnone + +declare double @fabs(double) readnone +declare double @llvm.fabs.f64(double) readnone +declare <2 x double> @llvm.fabs.v2f64(<2 x double>) readnone +declare <4 x double> @llvm.fabs.v4f64(<4 x double>) readnone + +; FUNC-LABEL: @v_fabs_f64 +; SI: V_AND_B32 +; SI: S_ENDPGM +define void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) { + %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone + %tidext = sext i32 %tid to i64 + %gep = getelementptr double addrspace(1)* %in, i64 %tidext + %val = load double addrspace(1)* %gep, align 8 + %fabs = call double @llvm.fabs.f64(double %val) + store double %fabs, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @fabs_f64 +; SI: V_AND_B32 +; SI-NOT: V_AND_B32 +; SI: S_ENDPGM +define void @fabs_f64(double addrspace(1)* %out, double %in) { + %fabs = call double @llvm.fabs.f64(double %in) + store double %fabs, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @fabs_v2f64 +; SI: V_AND_B32 +; SI: V_AND_B32 +; SI: S_ENDPGM +define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) { + %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in) + store <2 x double> %fabs, <2 x double> addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @fabs_v4f64 +; SI: V_AND_B32 +; SI: V_AND_B32 +; SI: V_AND_B32 +; SI: V_AND_B32 +; SI: S_ENDPGM +define void @fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) { + %fabs = call <4 x double> @llvm.fabs.v4f64(<4 x double> %in) + store <4 x double> %fabs, <4 x double> addrspace(1)* %out + ret void +} + +; SI-LABEL: @fabs_fold_f64 +; SI: S_LOAD_DWORDX2 [[ABS_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb +; SI-NOT: AND +; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|, {{v\[[0-9]+:[0-9]+\]}} +; SI: S_ENDPGM +define void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1) { + %fabs = call double @llvm.fabs.f64(double %in0) + %fmul = fmul double %fabs, %in1 + store double %fmul, double addrspace(1)* %out + ret void +} + +; SI-LABEL: @fabs_fn_fold_f64 +; SI: S_LOAD_DWORDX2 [[ABS_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb +; SI-NOT: AND +; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|, {{v\[[0-9]+:[0-9]+\]}} +; SI: S_ENDPGM +define void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in1) { + %fabs = call double @fabs(double %in0) + %fmul = fmul double %fabs, %in1 + store double %fmul, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @fabs_free_f64 +; SI: V_AND_B32 +; SI: S_ENDPGM +define void @fabs_free_f64(double addrspace(1)* %out, i64 %in) { + %bc= bitcast i64 %in to double + %fabs = call double @llvm.fabs.f64(double %bc) + store double %fabs, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: @fabs_fn_free_f64 +; SI: V_AND_B32 +; SI: S_ENDPGM +define void @fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) { + %bc= bitcast i64 %in to double + %fabs = call double @fabs(double %bc) + store double %fabs, double addrspace(1)* %out + ret void +}