2013-05-06 16:17:29 +00:00
|
|
|
; Test 64-bit atomic XORs.
|
|
|
|
;
|
2013-07-19 16:24:22 +00:00
|
|
|
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
|
2013-05-06 16:17:29 +00:00
|
|
|
|
|
|
|
; Check XORs of a variable.
|
|
|
|
define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f1:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: lg %r2, 0(%r3)
|
|
|
|
; CHECK: [[LABEL:\.[^ ]*]]:
|
|
|
|
; CHECK: lgr %r0, %r2
|
|
|
|
; CHECK: xgr %r0, %r4
|
|
|
|
; CHECK: csg %r2, %r0, 0(%r3)
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187495 91177308-0d34-0410-b5e6-96231b3b80d8
2013-07-31 12:30:20 +00:00
|
|
|
; CHECK: jl [[LABEL]]
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: br %r14
|
|
|
|
%res = atomicrmw xor i64 *%src, i64 %b seq_cst
|
|
|
|
ret i64 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the lowest useful XILF value.
|
|
|
|
define i64 @f2(i64 %dummy, i64 *%src) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f2:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: lg %r2, 0(%r3)
|
|
|
|
; CHECK: [[LABEL:\.[^ ]*]]:
|
|
|
|
; CHECK: lgr %r0, %r2
|
|
|
|
; CHECK: xilf %r0, 1
|
|
|
|
; CHECK: csg %r2, %r0, 0(%r3)
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187495 91177308-0d34-0410-b5e6-96231b3b80d8
2013-07-31 12:30:20 +00:00
|
|
|
; CHECK: jl [[LABEL]]
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: br %r14
|
|
|
|
%res = atomicrmw xor i64 *%src, i64 1 seq_cst
|
|
|
|
ret i64 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the high end of the XILF range.
|
|
|
|
define i64 @f3(i64 %dummy, i64 *%src) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f3:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: xilf %r0, 4294967295
|
|
|
|
; CHECK: br %r14
|
|
|
|
%res = atomicrmw xor i64 *%src, i64 4294967295 seq_cst
|
|
|
|
ret i64 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the lowest useful XIHF value, which is one greater than above.
|
|
|
|
define i64 @f4(i64 %dummy, i64 *%src) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f4:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: xihf %r0, 1
|
|
|
|
; CHECK: br %r14
|
|
|
|
%res = atomicrmw xor i64 *%src, i64 4294967296 seq_cst
|
|
|
|
ret i64 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the next value up, which must use a register. (We could use
|
|
|
|
; combinations of XIH* and XIL* instead, but that isn't implemented.)
|
|
|
|
define i64 @f5(i64 %dummy, i64 *%src) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f5:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: xgr
|
|
|
|
; CHECK: br %r14
|
|
|
|
%res = atomicrmw xor i64 *%src, i64 4294967297 seq_cst
|
|
|
|
ret i64 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the high end of the XIHF range.
|
|
|
|
define i64 @f6(i64 %dummy, i64 *%src) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f6:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: xihf %r0, 4294967295
|
|
|
|
; CHECK: br %r14
|
|
|
|
%res = atomicrmw xor i64 *%src, i64 -4294967296 seq_cst
|
|
|
|
ret i64 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the next value up, which must use a register.
|
|
|
|
define i64 @f7(i64 %dummy, i64 *%src) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f7:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: xgr
|
|
|
|
; CHECK: br %r14
|
|
|
|
%res = atomicrmw xor i64 *%src, i64 -4294967295 seq_cst
|
|
|
|
ret i64 %res
|
|
|
|
}
|