2013-05-06 16:17:29 +00:00
|
|
|
; Test 32-bit signed comparison in which the second operand is a variable.
|
|
|
|
;
|
|
|
|
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
|
|
|
|
|
2013-07-25 09:34:38 +00:00
|
|
|
declare i32 @foo()
|
|
|
|
|
2013-05-06 16:17:29 +00:00
|
|
|
; Check register comparison.
|
|
|
|
define double @f1(double %a, double %b, i32 %i1, i32 %i2) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f1:
|
2013-05-28 10:41:11 +00:00
|
|
|
; CHECK: crjl %r2, %r3
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the low end of the C range.
|
|
|
|
define double @f2(double %a, double %b, i32 %i1, i32 *%ptr) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f2:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: c %r2, 0(%r3)
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the high end of the aligned C range.
|
|
|
|
define double @f3(double %a, double %b, i32 %i1, i32 *%base) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f3:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: c %r2, 4092(%r3)
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%ptr = getelementptr i32 *%base, i64 1023
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the next word up, which should use CY instead of C.
|
|
|
|
define double @f4(double %a, double %b, i32 %i1, i32 *%base) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f4:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: cy %r2, 4096(%r3)
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%ptr = getelementptr i32 *%base, i64 1024
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the high end of the aligned CY range.
|
|
|
|
define double @f5(double %a, double %b, i32 %i1, i32 *%base) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f5:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: cy %r2, 524284(%r3)
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%ptr = getelementptr i32 *%base, i64 131071
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the next word up, which needs separate address logic.
|
|
|
|
; Other sequences besides this one would be OK.
|
|
|
|
define double @f6(double %a, double %b, i32 %i1, i32 *%base) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f6:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: agfi %r3, 524288
|
|
|
|
; CHECK: c %r2, 0(%r3)
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%ptr = getelementptr i32 *%base, i64 131072
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the high end of the negative aligned CY range.
|
|
|
|
define double @f7(double %a, double %b, i32 %i1, i32 *%base) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f7:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: cy %r2, -4(%r3)
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%ptr = getelementptr i32 *%base, i64 -1
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the low end of the CY range.
|
|
|
|
define double @f8(double %a, double %b, i32 %i1, i32 *%base) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f8:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: cy %r2, -524288(%r3)
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%ptr = getelementptr i32 *%base, i64 -131072
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check the next word down, which needs separate address logic.
|
|
|
|
; Other sequences besides this one would be OK.
|
|
|
|
define double @f9(double %a, double %b, i32 %i1, i32 *%base) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f9:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: agfi %r3, -524292
|
|
|
|
; CHECK: c %r2, 0(%r3)
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%ptr = getelementptr i32 *%base, i64 -131073
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check that C allows an index.
|
|
|
|
define double @f10(double %a, double %b, i32 %i1, i64 %base, i64 %index) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f10:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: c %r2, 4092({{%r4,%r3|%r3,%r4}})
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%add1 = add i64 %base, %index
|
|
|
|
%add2 = add i64 %add1, 4092
|
|
|
|
%ptr = inttoptr i64 %add2 to i32 *
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check that CY allows an index.
|
|
|
|
define double @f11(double %a, double %b, i32 %i1, i64 %base, i64 %index) {
|
2013-07-14 06:24:09 +00:00
|
|
|
; CHECK-LABEL: f11:
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: cy %r2, 4096({{%r4,%r3|%r3,%r4}})
|
2013-05-21 08:53:17 +00:00
|
|
|
; CHECK-NEXT: jl
|
2013-05-06 16:17:29 +00:00
|
|
|
; CHECK: ldr %f0, %f2
|
|
|
|
; CHECK: br %r14
|
|
|
|
%add1 = add i64 %base, %index
|
|
|
|
%add2 = add i64 %add1, 4096
|
|
|
|
%ptr = inttoptr i64 %add2 to i32 *
|
|
|
|
%i2 = load i32 *%ptr
|
|
|
|
%cond = icmp slt i32 %i1, %i2
|
|
|
|
%res = select i1 %cond, double %a, double %b
|
|
|
|
ret double %res
|
|
|
|
}
|
2013-07-25 09:34:38 +00:00
|
|
|
|
|
|
|
; The first branch here got recreated by InsertBranch while splitting the
|
|
|
|
; critical edge %entry->%while.body, which lost the kills information for CC.
|
|
|
|
define void @f12(i32 %a, i32 %b) {
|
|
|
|
; CHECK-LABEL: f12:
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187495 91177308-0d34-0410-b5e6-96231b3b80d8
2013-07-31 12:30:20 +00:00
|
|
|
; CHECK: cije %r2, 0
|
2013-07-25 09:34:38 +00:00
|
|
|
; CHECK: crjlh %r2,
|
|
|
|
; CHECK: br %r14
|
|
|
|
entry:
|
[SystemZ] Be more careful about inverting CC masks
System z branches have a mask to select which of the 4 CC values should
cause the branch to be taken. We can invert a branch by inverting the mask.
However, not all instructions can produce all 4 CC values, so inverting
the branch like this can lead to some oddities. For example, integer
comparisons only produce a CC of 0 (equal), 1 (less) or 2 (greater).
If an integer EQ is reversed to NE before instruction selection,
the branch will test for 1 or 2. If instead the branch is reversed
after instruction selection (by inverting the mask), it will test for
1, 2 or 3. Both are correct, but the second isn't really canonical.
This patch therefore keeps track of which CC values are possible
and uses this when inverting a mask.
Although this is mostly cosmestic, it fixes undefined behavior
for the CIJNLH in branch-08.ll. Another fix would have been
to mask out bit 0 when generating the fused compare and branch,
but the point of this patch is that we shouldn't need to do that
in the first place.
The patch also makes it easier to reuse CC results from other instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187495 91177308-0d34-0410-b5e6-96231b3b80d8
2013-07-31 12:30:20 +00:00
|
|
|
%cmp11 = icmp eq i32 %a, 0
|
2013-07-25 09:34:38 +00:00
|
|
|
br i1 %cmp11, label %while.end, label %while.body
|
|
|
|
|
|
|
|
while.body:
|
|
|
|
%c = call i32 @foo()
|
|
|
|
%cmp12 = icmp eq i32 %c, %b
|
|
|
|
br i1 %cmp12, label %while.end, label %while.body
|
|
|
|
|
|
|
|
while.end:
|
|
|
|
ret void
|
|
|
|
}
|