mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-08 19:06:39 +00:00
7c9c6ed761
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230794 91177308-0d34-0410-b5e6-96231b3b80d8
62 lines
1.9 KiB
LLVM
62 lines
1.9 KiB
LLVM
; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
|
|
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
|
|
|
|
; BOTH-LABEL: {{^}}s_rotr_i64:
|
|
; BOTH-DAG: s_sub_i32
|
|
; BOTH-DAG: s_lshr_b64
|
|
; BOTH-DAG: s_lshl_b64
|
|
; BOTH: s_or_b64
|
|
define void @s_rotr_i64(i64 addrspace(1)* %in, i64 %x, i64 %y) {
|
|
entry:
|
|
%tmp0 = sub i64 64, %y
|
|
%tmp1 = shl i64 %x, %tmp0
|
|
%tmp2 = lshr i64 %x, %y
|
|
%tmp3 = or i64 %tmp1, %tmp2
|
|
store i64 %tmp3, i64 addrspace(1)* %in
|
|
ret void
|
|
}
|
|
|
|
; BOTH-LABEL: {{^}}v_rotr_i64:
|
|
; BOTH-DAG: v_sub_i32
|
|
; SI-DAG: v_lshr_b64
|
|
; SI-DAG: v_lshl_b64
|
|
; VI-DAG: v_lshrrev_b64
|
|
; VI-DAG: v_lshlrev_b64
|
|
; BOTH: v_or_b32
|
|
; BOTH: v_or_b32
|
|
define void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) {
|
|
entry:
|
|
%x = load i64, i64 addrspace(1)* %xptr, align 8
|
|
%y = load i64, i64 addrspace(1)* %yptr, align 8
|
|
%tmp0 = sub i64 64, %y
|
|
%tmp1 = shl i64 %x, %tmp0
|
|
%tmp2 = lshr i64 %x, %y
|
|
%tmp3 = or i64 %tmp1, %tmp2
|
|
store i64 %tmp3, i64 addrspace(1)* %in
|
|
ret void
|
|
}
|
|
|
|
; BOTH-LABEL: {{^}}s_rotr_v2i64:
|
|
define void @s_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> %x, <2 x i64> %y) {
|
|
entry:
|
|
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
|
|
%tmp1 = shl <2 x i64> %x, %tmp0
|
|
%tmp2 = lshr <2 x i64> %x, %y
|
|
%tmp3 = or <2 x i64> %tmp1, %tmp2
|
|
store <2 x i64> %tmp3, <2 x i64> addrspace(1)* %in
|
|
ret void
|
|
}
|
|
|
|
; BOTH-LABEL: {{^}}v_rotr_v2i64:
|
|
define void @v_rotr_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) {
|
|
entry:
|
|
%x = load <2 x i64>, <2 x i64> addrspace(1)* %xptr, align 8
|
|
%y = load <2 x i64>, <2 x i64> addrspace(1)* %yptr, align 8
|
|
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
|
|
%tmp1 = shl <2 x i64> %x, %tmp0
|
|
%tmp2 = lshr <2 x i64> %x, %y
|
|
%tmp3 = or <2 x i64> %tmp1, %tmp2
|
|
store <2 x i64> %tmp3, <2 x i64> addrspace(1)* %in
|
|
ret void
|
|
}
|