mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
R600/SI: Add global atomicrmw xor
v2: Add separate offset/no-offset tests Signed-off-by: Aaron Watry <awatry@gmail.com> Reviewed-by: Matt Arsenault <matthew.arsenault@amd.com> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@220109 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e81b68b86c
commit
2107be5bc7
@ -394,6 +394,7 @@ def atomic_or_global : global_binary_atomic_op<atomic_load_or>;
|
||||
def atomic_sub_global : global_binary_atomic_op<atomic_load_sub>;
|
||||
def atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
|
||||
def atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
|
||||
def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Misc Pattern Fragments
|
||||
|
@ -922,7 +922,9 @@ defm BUFFER_ATOMIC_AND : MUBUF_Atomic <
|
||||
defm BUFFER_ATOMIC_OR : MUBUF_Atomic <
|
||||
0x0000003a, "BUFFER_ATOMIC_OR", VReg_32, i32, atomic_or_global
|
||||
>;
|
||||
//def BUFFER_ATOMIC_XOR : MUBUF_ <0x0000003b, "BUFFER_ATOMIC_XOR", []>;
|
||||
defm BUFFER_ATOMIC_XOR : MUBUF_Atomic <
|
||||
0x0000003b, "BUFFER_ATOMIC_XOR", VReg_32, i32, atomic_xor_global
|
||||
>;
|
||||
//def BUFFER_ATOMIC_INC : MUBUF_ <0x0000003c, "BUFFER_ATOMIC_INC", []>;
|
||||
//def BUFFER_ATOMIC_DEC : MUBUF_ <0x0000003d, "BUFFER_ATOMIC_DEC", []>;
|
||||
//def BUFFER_ATOMIC_FCMPSWAP : MUBUF_ <0x0000003e, "BUFFER_ATOMIC_FCMPSWAP", []>;
|
||||
|
@ -639,3 +639,83 @@ entry:
|
||||
store i32 %0, i32 addrspace(1)* %out2
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}atomic_xor_i32_offset:
|
||||
; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
|
||||
define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
|
||||
entry:
|
||||
%gep = getelementptr i32 addrspace(1)* %out, i32 4
|
||||
%0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}atomic_xor_i32_ret_offset:
|
||||
; SI: BUFFER_ATOMIC_XOR [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
|
||||
; SI: BUFFER_STORE_DWORD [[RET]]
|
||||
define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
|
||||
entry:
|
||||
%gep = getelementptr i32 addrspace(1)* %out, i32 4
|
||||
%0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
|
||||
store i32 %0, i32 addrspace(1)* %out2
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}atomic_xor_i32_addr64_offset:
|
||||
; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
|
||||
define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
|
||||
entry:
|
||||
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
|
||||
%gep = getelementptr i32 addrspace(1)* %ptr, i32 4
|
||||
%0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64_offset:
|
||||
; SI: BUFFER_ATOMIC_XOR [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
|
||||
; SI: BUFFER_STORE_DWORD [[RET]]
|
||||
define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
|
||||
entry:
|
||||
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
|
||||
%gep = getelementptr i32 addrspace(1)* %ptr, i32 4
|
||||
%0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
|
||||
store i32 %0, i32 addrspace(1)* %out2
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}atomic_xor_i32:
|
||||
; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
|
||||
define void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) {
|
||||
entry:
|
||||
%0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}atomic_xor_i32_ret:
|
||||
; SI: BUFFER_ATOMIC_XOR [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
|
||||
; SI: BUFFER_STORE_DWORD [[RET]]
|
||||
define void @atomic_xor_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
|
||||
entry:
|
||||
%0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
|
||||
store i32 %0, i32 addrspace(1)* %out2
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}atomic_xor_i32_addr64:
|
||||
; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
|
||||
define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
|
||||
entry:
|
||||
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
|
||||
%0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
|
||||
ret void
|
||||
}
|
||||
|
||||
; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64:
|
||||
; SI: BUFFER_ATOMIC_XOR [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
|
||||
; SI: BUFFER_STORE_DWORD [[RET]]
|
||||
define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
|
||||
entry:
|
||||
%ptr = getelementptr i32 addrspace(1)* %out, i64 %index
|
||||
%0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
|
||||
store i32 %0, i32 addrspace(1)* %out2
|
||||
ret void
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user