Implement PPC counter loops as a late IR-level pass
The old PPCCTRLoops pass, like the Hexagon pass version from which it was
derived, could only handle some simple loops in canonical form. We cannot
directly adapt the new Hexagon hardware loops pass, however, because the
Hexagon pass contains a fundamental assumption that non-constant-trip-count
loops will contain a guard, and this is not always true (the result being that
incorrect negative counts can be generated). With this commit, we replace the
pass with a late IR-level pass which makes use of SE to calculate the
backedge-taken counts and safely generate the loop-count expressions (including
any necessary max() parts). This IR level pass inserts custom intrinsics that
are lowered into the desired decrement-and-branch instructions.
The most fragile part of this new implementation is that interfering uses of
the counter register must be detected on the IR level (and, on PPC, this also
includes any indirect branches in addition to function calls). Also, to make
all of this work, we need a variant of the mtctr instruction that is marked
as having side effects. Without this, machine-code level CSE, DCE, etc.
illegally transform the resulting code. Hopefully, this can be improved
in the future.
This new pass is smaller than the original (and much smaller than the new
Hexagon hardware loops pass), and can handle many additional cases correctly.
In addition, the preheader-creation code has been copied from LoopSimplify, and
after we decide on where it belongs, this code will be refactored so that it
can be explicitly shared (making this implementation even smaller).
The new test-case files ctrloop-{le,lt,ne}.ll have been adapted from tests for
the new Hexagon pass. There are a few classes of loops that this pass does not
transform (noted by FIXMEs in the files), but these deficiencies can be
addressed within the SE infrastructure (thus helping many other passes as well).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181927 91177308-0d34-0410-b5e6-96231b3b80d8
2013-05-15 21:37:41 +00:00
|
|
|
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
|
|
|
|
target triple = "powerpc64-unknown-linux-gnu"
|
|
|
|
; RUN: llc < %s -march=ppc64 | FileCheck %s
|
|
|
|
|
2014-04-12 21:52:38 +00:00
|
|
|
; XFAIL: *
|
|
|
|
; SE needs improvement
|
|
|
|
|
Implement PPC counter loops as a late IR-level pass
The old PPCCTRLoops pass, like the Hexagon pass version from which it was
derived, could only handle some simple loops in canonical form. We cannot
directly adapt the new Hexagon hardware loops pass, however, because the
Hexagon pass contains a fundamental assumption that non-constant-trip-count
loops will contain a guard, and this is not always true (the result being that
incorrect negative counts can be generated). With this commit, we replace the
pass with a late IR-level pass which makes use of SE to calculate the
backedge-taken counts and safely generate the loop-count expressions (including
any necessary max() parts). This IR level pass inserts custom intrinsics that
are lowered into the desired decrement-and-branch instructions.
The most fragile part of this new implementation is that interfering uses of
the counter register must be detected on the IR level (and, on PPC, this also
includes any indirect branches in addition to function calls). Also, to make
all of this work, we need a variant of the mtctr instruction that is marked
as having side effects. Without this, machine-code level CSE, DCE, etc.
illegally transform the resulting code. Hopefully, this can be improved
in the future.
This new pass is smaller than the original (and much smaller than the new
Hexagon hardware loops pass), and can handle many additional cases correctly.
In addition, the preheader-creation code has been copied from LoopSimplify, and
after we decide on where it belongs, this code will be refactored so that it
can be explicitly shared (making this implementation even smaller).
The new test-case files ctrloop-{le,lt,ne}.ll have been adapted from tests for
the new Hexagon pass. There are a few classes of loops that this pass does not
transform (noted by FIXMEs in the files), but these deficiencies can be
addressed within the SE infrastructure (thus helping many other passes as well).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181927 91177308-0d34-0410-b5e6-96231b3b80d8
2013-05-15 21:37:41 +00:00
|
|
|
; CHECK: test_pos1_ir_sle
|
|
|
|
; CHECK: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos1_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 28395, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 1
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos2_ir_sle
|
2013-10-19 00:14:04 +00:00
|
|
|
; CHECK: bdnz
|
Implement PPC counter loops as a late IR-level pass
The old PPCCTRLoops pass, like the Hexagon pass version from which it was
derived, could only handle some simple loops in canonical form. We cannot
directly adapt the new Hexagon hardware loops pass, however, because the
Hexagon pass contains a fundamental assumption that non-constant-trip-count
loops will contain a guard, and this is not always true (the result being that
incorrect negative counts can be generated). With this commit, we replace the
pass with a late IR-level pass which makes use of SE to calculate the
backedge-taken counts and safely generate the loop-count expressions (including
any necessary max() parts). This IR level pass inserts custom intrinsics that
are lowered into the desired decrement-and-branch instructions.
The most fragile part of this new implementation is that interfering uses of
the counter register must be detected on the IR level (and, on PPC, this also
includes any indirect branches in addition to function calls). Also, to make
all of this work, we need a variant of the mtctr instruction that is marked
as having side effects. Without this, machine-code level CSE, DCE, etc.
illegally transform the resulting code. Hopefully, this can be improved
in the future.
This new pass is smaller than the original (and much smaller than the new
Hexagon hardware loops pass), and can handle many additional cases correctly.
In addition, the preheader-creation code has been copied from LoopSimplify, and
after we decide on where it belongs, this code will be refactored so that it
can be explicitly shared (making this implementation even smaller).
The new test-case files ctrloop-{le,lt,ne}.ll have been adapted from tests for
the new Hexagon pass. There are a few classes of loops that this pass does not
transform (noted by FIXMEs in the files), but these deficiencies can be
addressed within the SE infrastructure (thus helping many other passes as well).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181927 91177308-0d34-0410-b5e6-96231b3b80d8
2013-05-15 21:37:41 +00:00
|
|
|
; a < b
|
|
|
|
define void @test_pos2_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 9073, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 2
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos4_ir_sle
|
2013-10-19 00:14:04 +00:00
|
|
|
; CHECK: bdnz
|
Implement PPC counter loops as a late IR-level pass
The old PPCCTRLoops pass, like the Hexagon pass version from which it was
derived, could only handle some simple loops in canonical form. We cannot
directly adapt the new Hexagon hardware loops pass, however, because the
Hexagon pass contains a fundamental assumption that non-constant-trip-count
loops will contain a guard, and this is not always true (the result being that
incorrect negative counts can be generated). With this commit, we replace the
pass with a late IR-level pass which makes use of SE to calculate the
backedge-taken counts and safely generate the loop-count expressions (including
any necessary max() parts). This IR level pass inserts custom intrinsics that
are lowered into the desired decrement-and-branch instructions.
The most fragile part of this new implementation is that interfering uses of
the counter register must be detected on the IR level (and, on PPC, this also
includes any indirect branches in addition to function calls). Also, to make
all of this work, we need a variant of the mtctr instruction that is marked
as having side effects. Without this, machine-code level CSE, DCE, etc.
illegally transform the resulting code. Hopefully, this can be improved
in the future.
This new pass is smaller than the original (and much smaller than the new
Hexagon hardware loops pass), and can handle many additional cases correctly.
In addition, the preheader-creation code has been copied from LoopSimplify, and
after we decide on where it belongs, this code will be refactored so that it
can be explicitly shared (making this implementation even smaller).
The new test-case files ctrloop-{le,lt,ne}.ll have been adapted from tests for
the new Hexagon pass. There are a few classes of loops that this pass does not
transform (noted by FIXMEs in the files), but these deficiencies can be
addressed within the SE infrastructure (thus helping many other passes as well).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181927 91177308-0d34-0410-b5e6-96231b3b80d8
2013-05-15 21:37:41 +00:00
|
|
|
; a < b
|
|
|
|
define void @test_pos4_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 21956, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 4
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos8_ir_sle
|
2013-10-19 00:14:04 +00:00
|
|
|
; CHECK: bdnz
|
Implement PPC counter loops as a late IR-level pass
The old PPCCTRLoops pass, like the Hexagon pass version from which it was
derived, could only handle some simple loops in canonical form. We cannot
directly adapt the new Hexagon hardware loops pass, however, because the
Hexagon pass contains a fundamental assumption that non-constant-trip-count
loops will contain a guard, and this is not always true (the result being that
incorrect negative counts can be generated). With this commit, we replace the
pass with a late IR-level pass which makes use of SE to calculate the
backedge-taken counts and safely generate the loop-count expressions (including
any necessary max() parts). This IR level pass inserts custom intrinsics that
are lowered into the desired decrement-and-branch instructions.
The most fragile part of this new implementation is that interfering uses of
the counter register must be detected on the IR level (and, on PPC, this also
includes any indirect branches in addition to function calls). Also, to make
all of this work, we need a variant of the mtctr instruction that is marked
as having side effects. Without this, machine-code level CSE, DCE, etc.
illegally transform the resulting code. Hopefully, this can be improved
in the future.
This new pass is smaller than the original (and much smaller than the new
Hexagon hardware loops pass), and can handle many additional cases correctly.
In addition, the preheader-creation code has been copied from LoopSimplify, and
after we decide on where it belongs, this code will be refactored so that it
can be explicitly shared (making this implementation even smaller).
The new test-case files ctrloop-{le,lt,ne}.ll have been adapted from tests for
the new Hexagon pass. There are a few classes of loops that this pass does not
transform (noted by FIXMEs in the files), but these deficiencies can be
addressed within the SE infrastructure (thus helping many other passes as well).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181927 91177308-0d34-0410-b5e6-96231b3b80d8
2013-05-15 21:37:41 +00:00
|
|
|
; a < b
|
|
|
|
define void @test_pos8_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 16782, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 8
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos16_ir_sle
|
2013-10-19 00:14:04 +00:00
|
|
|
; CHECK: bdnz
|
Implement PPC counter loops as a late IR-level pass
The old PPCCTRLoops pass, like the Hexagon pass version from which it was
derived, could only handle some simple loops in canonical form. We cannot
directly adapt the new Hexagon hardware loops pass, however, because the
Hexagon pass contains a fundamental assumption that non-constant-trip-count
loops will contain a guard, and this is not always true (the result being that
incorrect negative counts can be generated). With this commit, we replace the
pass with a late IR-level pass which makes use of SE to calculate the
backedge-taken counts and safely generate the loop-count expressions (including
any necessary max() parts). This IR level pass inserts custom intrinsics that
are lowered into the desired decrement-and-branch instructions.
The most fragile part of this new implementation is that interfering uses of
the counter register must be detected on the IR level (and, on PPC, this also
includes any indirect branches in addition to function calls). Also, to make
all of this work, we need a variant of the mtctr instruction that is marked
as having side effects. Without this, machine-code level CSE, DCE, etc.
illegally transform the resulting code. Hopefully, this can be improved
in the future.
This new pass is smaller than the original (and much smaller than the new
Hexagon hardware loops pass), and can handle many additional cases correctly.
In addition, the preheader-creation code has been copied from LoopSimplify, and
after we decide on where it belongs, this code will be refactored so that it
can be explicitly shared (making this implementation even smaller).
The new test-case files ctrloop-{le,lt,ne}.ll have been adapted from tests for
the new Hexagon pass. There are a few classes of loops that this pass does not
transform (noted by FIXMEs in the files), but these deficiencies can be
addressed within the SE infrastructure (thus helping many other passes as well).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181927 91177308-0d34-0410-b5e6-96231b3b80d8
2013-05-15 21:37:41 +00:00
|
|
|
; a < b
|
|
|
|
define void @test_pos16_ir_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 19097, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 16
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos1_ri_sle
|
|
|
|
; CHECK: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos1_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, 14040
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 1
|
|
|
|
%cmp = icmp sle i32 %inc, 14040
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos2_ri_sle
|
|
|
|
; CHECK: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos2_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, 13710
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 2
|
|
|
|
%cmp = icmp sle i32 %inc, 13710
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos4_ri_sle
|
|
|
|
; CHECK: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos4_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, 9920
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 4
|
|
|
|
%cmp = icmp sle i32 %inc, 9920
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos8_ri_sle
|
|
|
|
; CHECK: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos8_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, 18924
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 8
|
|
|
|
%cmp = icmp sle i32 %inc, 18924
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos16_ri_sle
|
|
|
|
; CHECK: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos16_ri_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, 11812
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 16
|
|
|
|
%cmp = icmp sle i32 %inc, 11812
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos1_rr_sle
|
|
|
|
; FIXME: Support this loop!
|
|
|
|
; CHECK-NOT: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos1_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 1
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos2_rr_sle
|
|
|
|
; FIXME: Support this loop!
|
|
|
|
; CHECK-NOT: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos2_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 2
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos4_rr_sle
|
|
|
|
; FIXME: Support this loop!
|
|
|
|
; CHECK-NOT: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos4_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 4
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos8_rr_sle
|
|
|
|
; FIXME: Support this loop!
|
|
|
|
; CHECK-NOT: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos8_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 8
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CHECK: test_pos16_rr_sle
|
|
|
|
; FIXME: Support this loop!
|
|
|
|
; CHECK-NOT: bdnz
|
|
|
|
; a < b
|
|
|
|
define void @test_pos16_rr_sle(i8* nocapture %p, i32 %a, i32 %b) nounwind {
|
|
|
|
entry:
|
|
|
|
%cmp3 = icmp sle i32 %a, %b
|
|
|
|
br i1 %cmp3, label %for.body.lr.ph, label %for.end
|
|
|
|
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.body
|
|
|
|
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i8* %p, i32 %i.04
|
|
|
|
%0 = load i8* %arrayidx, align 1
|
|
|
|
%conv = zext i8 %0 to i32
|
|
|
|
%add = add nsw i32 %conv, 1
|
|
|
|
%conv1 = trunc i32 %add to i8
|
|
|
|
store i8 %conv1, i8* %arrayidx, align 1
|
|
|
|
%inc = add nsw i32 %i.04, 16
|
|
|
|
%cmp = icmp sle i32 %inc, %b
|
|
|
|
br i1 %cmp, label %for.body, label %for.end
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body, %entry
|
|
|
|
ret void
|
|
|
|
}
|