mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
R600/SI: Fix selection error on i64 rotl / rotr.
Evergreen is still broken due to missing shl_parts. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210885 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
250305156a
commit
d344c6bcf9
@ -126,8 +126,10 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
|
||||
setOperationAction(ISD::FROUND, MVT::f32, Legal);
|
||||
setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
|
||||
|
||||
// The hardware supports ROTR, but not ROTL
|
||||
// The hardware supports 32-bit ROTR, but not ROTL.
|
||||
setOperationAction(ISD::ROTL, MVT::i32, Expand);
|
||||
setOperationAction(ISD::ROTL, MVT::i64, Expand);
|
||||
setOperationAction(ISD::ROTR, MVT::i64, Expand);
|
||||
|
||||
// Lower floating point store/load to integer store/load to reduce the number
|
||||
// of patterns in tablegen.
|
||||
@ -264,11 +266,13 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
|
||||
setOperationAction(ISD::MUL, VT, Expand);
|
||||
setOperationAction(ISD::OR, VT, Expand);
|
||||
setOperationAction(ISD::SHL, VT, Expand);
|
||||
setOperationAction(ISD::SINT_TO_FP, VT, Expand);
|
||||
setOperationAction(ISD::SRL, VT, Expand);
|
||||
setOperationAction(ISD::SRA, VT, Expand);
|
||||
setOperationAction(ISD::SRL, VT, Expand);
|
||||
setOperationAction(ISD::ROTL, VT, Expand);
|
||||
setOperationAction(ISD::ROTR, VT, Expand);
|
||||
setOperationAction(ISD::SUB, VT, Expand);
|
||||
setOperationAction(ISD::UDIV, VT, Expand);
|
||||
setOperationAction(ISD::SINT_TO_FP, VT, Expand);
|
||||
setOperationAction(ISD::UINT_TO_FP, VT, Expand);
|
||||
setOperationAction(ISD::UREM, VT, Expand);
|
||||
setOperationAction(ISD::SELECT, VT, Expand);
|
||||
|
@ -52,9 +52,6 @@ private:
|
||||
MachineRegisterInfo & MRI, unsigned dword_offset) const;
|
||||
SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG) const;
|
||||
|
||||
/// \brief Lower ROTL opcode to BITALIGN
|
||||
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
||||
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
34
test/CodeGen/R600/rotl.i64.ll
Normal file
34
test/CodeGen/R600/rotl.i64.ll
Normal file
@ -0,0 +1,34 @@
|
||||
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
|
||||
|
||||
; FUNC-LABEL: @s_rotl_i64:
|
||||
; SI: S_LSHL_B64
|
||||
; SI: S_SUB_I32
|
||||
; SI: S_LSHR_B64
|
||||
; SI: S_OR_B64
|
||||
define void @s_rotl_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
|
||||
entry:
|
||||
%0 = shl i64 %x, %y
|
||||
%1 = sub i64 64, %y
|
||||
%2 = lshr i64 %x, %1
|
||||
%3 = or i64 %0, %2
|
||||
store i64 %3, i64 addrspace(1)* %in
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: @v_rotl_i64:
|
||||
; SI: V_LSHL_B64
|
||||
; SI: V_SUB_I32
|
||||
; SI: V_LSHR_B64
|
||||
; SI: V_OR_B32
|
||||
; SI: V_OR_B32
|
||||
define void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
|
||||
entry:
|
||||
%x = load i64 addrspace(1)* %xptr, align 8
|
||||
%y = load i64 addrspace(1)* %yptr, align 8
|
||||
%tmp0 = shl i64 %x, %y
|
||||
%tmp1 = sub i64 64, %y
|
||||
%tmp2 = lshr i64 %x, %tmp1
|
||||
%tmp3 = or i64 %tmp0, %tmp2
|
||||
store i64 %tmp3, i64 addrspace(1)* %in, align 8
|
||||
ret void
|
||||
}
|
54
test/CodeGen/R600/rotl.ll
Normal file
54
test/CodeGen/R600/rotl.ll
Normal file
@ -0,0 +1,54 @@
|
||||
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
|
||||
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
|
||||
|
||||
; FUNC-LABEL: @rotl_i32:
|
||||
; R600: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
|
||||
; R600-NEXT: 32
|
||||
; R600: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
|
||||
|
||||
; SI: S_SUB_I32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
|
||||
; SI: V_MOV_B32_e32 [[VDST:v[0-9]+]], [[SDST]]
|
||||
; SI: V_ALIGNBIT_B32 {{v[0-9]+, [s][0-9]+, v[0-9]+}}, [[VDST]]
|
||||
define void @rotl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
|
||||
entry:
|
||||
%0 = shl i32 %x, %y
|
||||
%1 = sub i32 32, %y
|
||||
%2 = lshr i32 %x, %1
|
||||
%3 = or i32 %0, %2
|
||||
store i32 %3, i32 addrspace(1)* %in
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: @rotl_v2i32
|
||||
; SI: S_SUB_I32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
; SI: S_SUB_I32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
define void @rotl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
|
||||
entry:
|
||||
%0 = shl <2 x i32> %x, %y
|
||||
%1 = sub <2 x i32> <i32 32, i32 32>, %y
|
||||
%2 = lshr <2 x i32> %x, %1
|
||||
%3 = or <2 x i32> %0, %2
|
||||
store <2 x i32> %3, <2 x i32> addrspace(1)* %in
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: @rotl_v4i32
|
||||
; SI: S_SUB_I32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
; SI: S_SUB_I32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
; SI: S_SUB_I32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
; SI: S_SUB_I32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
define void @rotl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
|
||||
entry:
|
||||
%0 = shl <4 x i32> %x, %y
|
||||
%1 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
|
||||
%2 = lshr <4 x i32> %x, %1
|
||||
%3 = or <4 x i32> %0, %2
|
||||
store <4 x i32> %3, <4 x i32> addrspace(1)* %in
|
||||
ret void
|
||||
}
|
@ -1,37 +1,52 @@
|
||||
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
|
||||
; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI-CHECK %s
|
||||
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
|
||||
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
|
||||
|
||||
; R600-CHECK-LABEL: @rotr:
|
||||
; R600-CHECK: BIT_ALIGN_INT
|
||||
; FUNC-LABEL: @rotr_i32:
|
||||
; R600: BIT_ALIGN_INT
|
||||
|
||||
; SI-CHECK-LABEL: @rotr:
|
||||
; SI-CHECK: V_ALIGNBIT_B32
|
||||
define void @rotr(i32 addrspace(1)* %in, i32 %x, i32 %y) {
|
||||
; SI: V_ALIGNBIT_B32
|
||||
define void @rotr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
|
||||
entry:
|
||||
%0 = sub i32 32, %y
|
||||
%1 = shl i32 %x, %0
|
||||
%2 = lshr i32 %x, %y
|
||||
%3 = or i32 %1, %2
|
||||
store i32 %3, i32 addrspace(1)* %in
|
||||
%tmp0 = sub i32 32, %y
|
||||
%tmp1 = shl i32 %x, %tmp0
|
||||
%tmp2 = lshr i32 %x, %y
|
||||
%tmp3 = or i32 %tmp1, %tmp2
|
||||
store i32 %tmp3, i32 addrspace(1)* %in
|
||||
ret void
|
||||
}
|
||||
|
||||
; R600-CHECK-LABEL: @rotl:
|
||||
; R600-CHECK: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
|
||||
; R600-CHECK-NEXT: 32
|
||||
; R600-CHECK: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
|
||||
; FUNC-LABEL: @rotr_v2i32:
|
||||
; R600: BIT_ALIGN_INT
|
||||
; R600: BIT_ALIGN_INT
|
||||
|
||||
|
||||
; SI-CHECK-LABEL: @rotl:
|
||||
; SI-CHECK: S_SUB_I32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
|
||||
; SI-CHECK: V_MOV_B32_e32 [[VDST:v[0-9]+]], [[SDST]]
|
||||
; SI-CHECK: V_ALIGNBIT_B32 {{v[0-9]+, [s][0-9]+, v[0-9]+}}, [[VDST]]
|
||||
define void @rotl(i32 addrspace(1)* %in, i32 %x, i32 %y) {
|
||||
; SI: V_ALIGNBIT_B32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
define void @rotr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
|
||||
entry:
|
||||
%0 = shl i32 %x, %y
|
||||
%1 = sub i32 32, %y
|
||||
%2 = lshr i32 %x, %1
|
||||
%3 = or i32 %0, %2
|
||||
store i32 %3, i32 addrspace(1)* %in
|
||||
%tmp0 = sub <2 x i32> <i32 32, i32 32>, %y
|
||||
%tmp1 = shl <2 x i32> %x, %tmp0
|
||||
%tmp2 = lshr <2 x i32> %x, %y
|
||||
%tmp3 = or <2 x i32> %tmp1, %tmp2
|
||||
store <2 x i32> %tmp3, <2 x i32> addrspace(1)* %in
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: @rotr_v4i32:
|
||||
; R600: BIT_ALIGN_INT
|
||||
; R600: BIT_ALIGN_INT
|
||||
; R600: BIT_ALIGN_INT
|
||||
; R600: BIT_ALIGN_INT
|
||||
|
||||
; SI: V_ALIGNBIT_B32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
; SI: V_ALIGNBIT_B32
|
||||
define void @rotr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
|
||||
entry:
|
||||
%tmp0 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
|
||||
%tmp1 = shl <4 x i32> %x, %tmp0
|
||||
%tmp2 = lshr <4 x i32> %x, %y
|
||||
%tmp3 = or <4 x i32> %tmp1, %tmp2
|
||||
store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %in
|
||||
ret void
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user