llvm-6502/test/CodeGen/R600/split-scalar-i64-add.ll
Tom Stellard 1f996fa36b R600/SI: Add a stub GCNTargetMachine
This is equivalent to the AMDGPUTargetMachine now, but it is the
starting point for separating R600 and GCN functionality into separate
targets.

It is recommened that users start using the gcn triple for GCN-based
GPUs, because using the r600 triple for these GPUs will be deprecated in
the future.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225277 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-06 18:00:21 +00:00

49 lines
1.8 KiB
LLVM

; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
declare i32 @llvm.r600.read.tidig.x() readnone
; This is broken because the low half of the 64-bit add remains on the
; SALU, but the upper half does not. The addc expects the carry bit
; set in vcc, which is undefined since the low scalar half add sets
; scc instead.
; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_0:
; SI: v_add_i32
; SI: v_addc_u32
define void @imp_def_vcc_split_i64_add_0(i64 addrspace(1)* %out, i32 %val) {
%vec.0 = insertelement <2 x i32> undef, i32 %val, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 999999, i32 1
%bc = bitcast <2 x i32> %vec.1 to i64
%add = add i64 %bc, 399
store i64 %add, i64 addrspace(1)* %out, align 8
ret void
}
; FUNC-LABEL: {{^}}imp_def_vcc_split_i64_add_1:
; SI: v_add_i32
; SI: v_addc_u32
define void @imp_def_vcc_split_i64_add_1(i64 addrspace(1)* %out, i32 %val0, i64 %val1) {
%vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 99999, i32 1
%bc = bitcast <2 x i32> %vec.1 to i64
%add = add i64 %bc, %val1
store i64 %add, i64 addrspace(1)* %out, align 8
ret void
}
; Doesn't use constants
; FUNC-LABEL @imp_def_vcc_split_i64_add_2
; SI: v_add_i32
; SI: v_addc_u32
define void @imp_def_vcc_split_i64_add_2(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
%gep = getelementptr i32 addrspace(1)* %in, i32 %tid
%load = load i32 addrspace(1)* %gep
%vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 %load, i32 1
%bc = bitcast <2 x i32> %vec.1 to i64
%add = add i64 %bc, %val1
store i64 %add, i64 addrspace(1)* %out, align 8
ret void
}