mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
c9c70b1651
This currently has a noticable effect on the kernel argument loads. LDS and global loads are more problematic, I think because of how copies are currently inserted to ensure that the address is a VGPR. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214942 91177308-0d34-0410-b5e6-96231b3b80d8
35 lines
890 B
LLVM
35 lines
890 B
LLVM
; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
|
|
|
|
; FUNC-LABEL: @s_rotl_i64:
|
|
; SI: S_SUB_I32
|
|
; SI: S_LSHR_B64
|
|
; SI: S_LSHL_B64
|
|
; SI: S_OR_B64
|
|
define void @s_rotl_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
|
|
entry:
|
|
%0 = shl i64 %x, %y
|
|
%1 = sub i64 64, %y
|
|
%2 = lshr i64 %x, %1
|
|
%3 = or i64 %0, %2
|
|
store i64 %3, i64 addrspace(1)* %in
|
|
ret void
|
|
}
|
|
|
|
; FUNC-LABEL: @v_rotl_i64:
|
|
; SI: V_LSHL_B64
|
|
; SI: V_SUB_I32
|
|
; SI: V_LSHR_B64
|
|
; SI: V_OR_B32
|
|
; SI: V_OR_B32
|
|
define void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
|
|
entry:
|
|
%x = load i64 addrspace(1)* %xptr, align 8
|
|
%y = load i64 addrspace(1)* %yptr, align 8
|
|
%tmp0 = shl i64 %x, %y
|
|
%tmp1 = sub i64 64, %y
|
|
%tmp2 = lshr i64 %x, %tmp1
|
|
%tmp3 = or i64 %tmp0, %tmp2
|
|
store i64 %tmp3, i64 addrspace(1)* %in, align 8
|
|
ret void
|
|
}
|