mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-16 11:30:51 +00:00
6824f127f9
System z branches have a mask to select which of the 4 CC values should cause the branch to be taken. We can invert a branch by inverting the mask. However, not all instructions can produce all 4 CC values, so inverting the branch like this can lead to some oddities. For example, integer comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater). If an integer EQ is reversed to NE before instruction selection, the branch will test for 1 or 2. If instead the branch is reversed after instruction selection (by inverting the mask), it will test for 1, 2 or 3. Both are correct, but the second isn't really canonical. This patch therefore keeps track of which CC values are possible and uses this when inverting a mask. Although this is mostly cosmestic, it fixes undefined behavior for the CIJNLH in branch-08.ll. Another fix would have been to mask out bit 0 when generating the fused compare and branch, but the point of this patch is that we shouldn't need to do that in the first place. The patch also makes it easier to reuse CC results from other instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187495 91177308-0d34-0410-b5e6-96231b3b80d8
123 lines
3.3 KiB
LLVM
123 lines
3.3 KiB
LLVM
; Test 32-bit atomic exchange.
|
|
;
|
|
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
|
|
|
|
; Check register exchange.
|
|
define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
|
|
; CHECK-LABEL: f1:
|
|
; CHECK: l %r2, 0(%r3)
|
|
; CHECK: [[LABEL:\.[^:]*]]:
|
|
; CHECK: cs %r2, %r4, 0(%r3)
|
|
; CHECK: jl [[LABEL]]
|
|
; CHECK: br %r14
|
|
%res = atomicrmw xchg i32 *%src, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check the high end of the aligned CS range.
|
|
define i32 @f2(i32 %dummy, i32 *%src, i32 %b) {
|
|
; CHECK-LABEL: f2:
|
|
; CHECK: l %r2, 4092(%r3)
|
|
; CHECK: cs %r2, {{%r[0-9]+}}, 4092(%r3)
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i32 *%src, i64 1023
|
|
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check the next word up, which requires CSY.
|
|
define i32 @f3(i32 %dummy, i32 *%src, i32 %b) {
|
|
; CHECK-LABEL: f3:
|
|
; CHECK: ly %r2, 4096(%r3)
|
|
; CHECK: csy %r2, {{%r[0-9]+}}, 4096(%r3)
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i32 *%src, i64 1024
|
|
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check the high end of the aligned CSY range.
|
|
define i32 @f4(i32 %dummy, i32 *%src, i32 %b) {
|
|
; CHECK-LABEL: f4:
|
|
; CHECK: ly %r2, 524284(%r3)
|
|
; CHECK: csy %r2, {{%r[0-9]+}}, 524284(%r3)
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i32 *%src, i64 131071
|
|
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check the next word up, which needs separate address logic.
|
|
define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
|
|
; CHECK-LABEL: f5:
|
|
; CHECK: agfi %r3, 524288
|
|
; CHECK: l %r2, 0(%r3)
|
|
; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i32 *%src, i64 131072
|
|
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check the high end of the negative aligned CSY range.
|
|
define i32 @f6(i32 %dummy, i32 *%src, i32 %b) {
|
|
; CHECK-LABEL: f6:
|
|
; CHECK: ly %r2, -4(%r3)
|
|
; CHECK: csy %r2, {{%r[0-9]+}}, -4(%r3)
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i32 *%src, i64 -1
|
|
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check the low end of the CSY range.
|
|
define i32 @f7(i32 %dummy, i32 *%src, i32 %b) {
|
|
; CHECK-LABEL: f7:
|
|
; CHECK: ly %r2, -524288(%r3)
|
|
; CHECK: csy %r2, {{%r[0-9]+}}, -524288(%r3)
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i32 *%src, i64 -131072
|
|
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check the next word down, which needs separate address logic.
|
|
define i32 @f8(i32 %dummy, i32 *%src, i32 %b) {
|
|
; CHECK-LABEL: f8:
|
|
; CHECK: agfi %r3, -524292
|
|
; CHECK: l %r2, 0(%r3)
|
|
; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
|
|
; CHECK: br %r14
|
|
%ptr = getelementptr i32 *%src, i64 -131073
|
|
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check that indexed addresses are not allowed.
|
|
define i32 @f9(i32 %dummy, i64 %base, i64 %index, i32 %b) {
|
|
; CHECK-LABEL: f9:
|
|
; CHECK: agr %r3, %r4
|
|
; CHECK: l %r2, 0(%r3)
|
|
; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
|
|
; CHECK: br %r14
|
|
%add = add i64 %base, %index
|
|
%ptr = inttoptr i64 %add to i32 *
|
|
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
|
|
ret i32 %res
|
|
}
|
|
|
|
; Check exchange of a constant. We should force it into a register and
|
|
; use the sequence above.
|
|
define i32 @f10(i32 %dummy, i32 *%src) {
|
|
; CHECK-LABEL: f10:
|
|
; CHECK: llill [[VALUE:%r[0-9+]]], 40000
|
|
; CHECK: l %r2, 0(%r3)
|
|
; CHECK: [[LABEL:\.[^:]*]]:
|
|
; CHECK: cs %r2, [[VALUE]], 0(%r3)
|
|
; CHECK: jl [[LABEL]]
|
|
; CHECK: br %r14
|
|
%res = atomicrmw xchg i32 *%src, i32 40000 seq_cst
|
|
ret i32 %res
|
|
}
|
|
|