R600: Expand vector float operations for both SI and R600

Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188596 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tom Stellard 2013-08-16 23:51:24 +00:00
parent 62c7749437
commit 0991c314d7
6 changed files with 128 additions and 88 deletions

View File

@ -115,14 +115,14 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
static const int types[] = {
static const int IntTypes[] = {
(int)MVT::v2i32,
(int)MVT::v4i32
};
const size_t NumTypes = array_lengthof(types);
const size_t NumIntTypes = array_lengthof(IntTypes);
for (unsigned int x = 0; x < NumTypes; ++x) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)types[x];
for (unsigned int x = 0; x < NumIntTypes; ++x) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)IntTypes[x];
//Expand the following operations for the current type by default
setOperationAction(ISD::ADD, VT, Expand);
setOperationAction(ISD::AND, VT, Expand);
@ -141,6 +141,20 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::XOR, VT, Expand);
}
static const int FloatTypes[] = {
(int)MVT::v2f32,
(int)MVT::v4f32
};
const size_t NumFloatTypes = array_lengthof(FloatTypes);
for (unsigned int x = 0; x < NumFloatTypes; ++x) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)FloatTypes[x];
setOperationAction(ISD::FADD, VT, Expand);
setOperationAction(ISD::FDIV, VT, Expand);
setOperationAction(ISD::FMUL, VT, Expand);
setOperationAction(ISD::FSUB, VT, Expand);
}
}
//===----------------------------------------------------------------------===//

View File

@ -38,15 +38,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
computeRegisterProperties();
setOperationAction(ISD::FADD, MVT::v4f32, Expand);
setOperationAction(ISD::FADD, MVT::v2f32, Expand);
setOperationAction(ISD::FMUL, MVT::v4f32, Expand);
setOperationAction(ISD::FMUL, MVT::v2f32, Expand);
setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
setOperationAction(ISD::FDIV, MVT::v2f32, Expand);
setOperationAction(ISD::FSUB, MVT::v4f32, Expand);
setOperationAction(ISD::FSUB, MVT::v2f32, Expand);
setOperationAction(ISD::FCOS, MVT::f32, Custom);
setOperationAction(ISD::FSIN, MVT::f32, Custom);

View File

@ -1,23 +1,23 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
; CHECK: @fadd_f32
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @fadd_f32() {
%r0 = call float @llvm.R600.load.input(i32 0)
%r1 = call float @llvm.R600.load.input(i32 1)
%r2 = fadd float %r0, %r1
call void @llvm.AMDGPU.store.output(float %r2, i32 0)
; R600-CHECK: @fadd_f32
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
; SI-CHECK: @fadd_f32
; SI-CHECK: V_ADD_F32
define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fadd float %a, %b
store float %0, float addrspace(1)* %out
ret void
}
declare float @llvm.R600.load.input(i32) readnone
declare void @llvm.AMDGPU.store.output(float, i32)
; CHECK: @fadd_v2f32
; CHECK-DAG: ADD * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
; CHECK-DAG: ADD * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
; R600-CHECK: @fadd_v2f32
; R600-CHECK-DAG: ADD * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
; R600-CHECK-DAG: ADD * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
; SI-CHECK: @fadd_v2f32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
entry:
%0 = fadd <2 x float> %a, %b
@ -25,12 +25,16 @@ entry:
ret void
}
; CHECK: @fadd_v4f32
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: @fadd_v4f32
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; SI-CHECK: @fadd_v4f32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
; SI-CHECK: V_ADD_F32
define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1) * %in

View File

@ -1,14 +1,20 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
; These tests check that fdiv is expanded correctly and also test that the
; scheduler is scheduling the RECIP_IEEE and MUL_IEEE instructions in separate
; instruction groups.
; CHECK: @fdiv_v2f32
; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z
; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y
; CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS
; CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[2].W, PS
; R600-CHECK: @fdiv_v2f32
; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z
; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y
; R600-CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS
; R600-CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[2].W, PS
; SI-CHECK: @fdiv_v2f32
; SI-CHECK-DAG: V_RCP_F32
; SI-CHECK-DAG: V_MUL_F32
; SI-CHECK-DAG: V_RCP_F32
; SI-CHECK-DAG: V_MUL_F32
define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
entry:
%0 = fdiv <2 x float> %a, %b
@ -16,16 +22,24 @@ entry:
ret void
}
; CHECK: @fdiv_v4f32
; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
; CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
; CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
; CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
; R600-CHECK: @fdiv_v4f32
; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
; R600-CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
; R600-CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
; R600-CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS
; SI-CHECK: @fdiv_v4f32
; SI-CHECK-DAG: V_RCP_F32
; SI-CHECK-DAG: V_MUL_F32
; SI-CHECK-DAG: V_RCP_F32
; SI-CHECK-DAG: V_MUL_F32
; SI-CHECK-DAG: V_RCP_F32
; SI-CHECK-DAG: V_MUL_F32
; SI-CHECK-DAG: V_RCP_F32
; SI-CHECK-DAG: V_MUL_F32
define void @fdiv_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1) * %in

View File

@ -1,23 +1,27 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
; CHECK: @fmul_f32
; CHECK: MUL_IEEE * {{T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @fmul_f32() {
%r0 = call float @llvm.R600.load.input(i32 0)
%r1 = call float @llvm.R600.load.input(i32 1)
%r2 = fmul float %r0, %r1
call void @llvm.AMDGPU.store.output(float %r2, i32 0)
ret void
; R600-CHECK: @fmul_f32
; R600-CHECK: MUL_IEEE * {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W
; SI-CHECK: @fmul_f32
; SI-CHECK: V_MUL_F32
define void @fmul_f32(float addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fmul float %a, %b
store float %0, float addrspace(1)* %out
ret void
}
declare float @llvm.R600.load.input(i32) readnone
declare void @llvm.AMDGPU.store.output(float, i32)
; CHECK: @fmul_v2f32
; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW]}}
; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW]}}
; R600-CHECK: @fmul_v2f32
; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW]}}
; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW]}}
; SI-CHECK: @fmul_v2f32
; SI-CHECK: V_MUL_F32
; SI-CHECK: V_MUL_F32
define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
entry:
%0 = fmul <2 x float> %a, %b
@ -25,12 +29,16 @@ entry:
ret void
}
; CHECK: @fmul_v4f32
; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: @fmul_v4f32
; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; SI-CHECK: @fmul_v4f32
; SI-CHECK: V_MUL_F32
; SI-CHECK: V_MUL_F32
; SI-CHECK: V_MUL_F32
; SI-CHECK: V_MUL_F32
define void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1) * %in

View File

@ -1,23 +1,27 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
; CHECK: @fsub_f32
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
define void @fsub_f32() {
%r0 = call float @llvm.R600.load.input(i32 0)
%r1 = call float @llvm.R600.load.input(i32 1)
%r2 = fsub float %r0, %r1
call void @llvm.AMDGPU.store.output(float %r2, i32 0)
ret void
; R600-CHECK: @fsub_f32
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W
; SI-CHECK: @fsub_f32
; SI-CHECK: V_SUB_F32
define void @fsub_f32(float addrspace(1)* %out, float %a, float %b) {
entry:
%0 = fsub float %a, %b
store float %0, float addrspace(1)* %out
ret void
}
declare float @llvm.R600.load.input(i32) readnone
declare void @llvm.AMDGPU.store.output(float, i32)
; CHECK: @fsub_v2f32
; CHECK-DAG: ADD * T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
; CHECK-DAG: ADD * T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
; R600-CHECK: @fsub_v2f32
; R600-CHECK-DAG: ADD * T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
; R600-CHECK-DAG: ADD * T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
; SI-CHECK: @fsub_v2f32
; SI-CHECK: V_SUB_F32
; SI-CHECK: V_SUB_F32
define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
entry:
%0 = fsub <2 x float> %a, %b
@ -25,11 +29,16 @@ entry:
ret void
}
; CHECK: @fsub_v4f32
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; R600-CHECK: @fsub_v4f32
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
; SI-CHECK: @fsub_v4f32
; SI-CHECK: V_SUB_F32
; SI-CHECK: V_SUB_F32
; SI-CHECK: V_SUB_F32
; SI-CHECK: V_SUB_F32
define void @fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1) * %in