mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
f2282cac95
Unfortunately, this in turn led to some lower quality SCEVs due to some different paths through expression simplification, so add getUDivExactExpr and use it. This fixes all instances of the problems that I found, but we can make that function smarter as necessary. Merge test "xor-and.ll" into "and-xor.ll" since I needed to update it anyways. Test 'nsw-offset.ll' analyzes a little deeper, %n now gets a scev in terms of %no instead of a SCEVUnknown. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200203 91177308-0d34-0410-b5e6-96231b3b80d8
78 lines
3.2 KiB
LLVM
78 lines
3.2 KiB
LLVM
; RUN: opt < %s -S -analyze -scalar-evolution | FileCheck %s
|
|
|
|
; ScalarEvolution should be able to fold away the sign-extensions
|
|
; on this loop with a primary induction variable incremented with
|
|
; a nsw add of 2.
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
|
|
|
|
define void @foo(i32 %no, double* nocapture %d, double* nocapture %q) nounwind {
|
|
entry:
|
|
%n = and i32 %no, 4294967294
|
|
%0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
|
|
br i1 %0, label %bb.nph, label %return
|
|
|
|
bb.nph: ; preds = %entry
|
|
br label %bb
|
|
|
|
bb: ; preds = %bb.nph, %bb1
|
|
%i.01 = phi i32 [ %16, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=5]
|
|
|
|
; CHECK: %1 = sext i32 %i.01 to i64
|
|
; CHECK: --> {0,+,2}<nuw><nsw><%bb>
|
|
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
|
|
|
|
; CHECK: %2 = getelementptr inbounds double* %d, i64 %1
|
|
; CHECK: --> {%d,+,16}<nsw><%bb>
|
|
%2 = getelementptr inbounds double* %d, i64 %1 ; <double*> [#uses=1]
|
|
|
|
%3 = load double* %2, align 8 ; <double> [#uses=1]
|
|
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
|
|
%5 = getelementptr inbounds double* %q, i64 %4 ; <double*> [#uses=1]
|
|
%6 = load double* %5, align 8 ; <double> [#uses=1]
|
|
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
|
|
|
|
; CHECK: %8 = sext i32 %7 to i64
|
|
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
|
|
%8 = sext i32 %7 to i64 ; <i64> [#uses=1]
|
|
|
|
; CHECK: %9 = getelementptr inbounds double* %q, i64 %8
|
|
; CHECK: {(8 + %q),+,16}<nsw><%bb>
|
|
%9 = getelementptr inbounds double* %q, i64 %8 ; <double*> [#uses=1]
|
|
|
|
; Artificially repeat the above three instructions, this time using
|
|
; add nsw instead of or.
|
|
%t7 = add nsw i32 %i.01, 1 ; <i32> [#uses=1]
|
|
|
|
; CHECK: %t8 = sext i32 %t7 to i64
|
|
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
|
|
%t8 = sext i32 %t7 to i64 ; <i64> [#uses=1]
|
|
|
|
; CHECK: %t9 = getelementptr inbounds double* %q, i64 %t8
|
|
; CHECK: {(8 + %q),+,16}<nsw><%bb>
|
|
%t9 = getelementptr inbounds double* %q, i64 %t8 ; <double*> [#uses=1]
|
|
|
|
%10 = load double* %9, align 8 ; <double> [#uses=1]
|
|
%11 = fadd double %6, %10 ; <double> [#uses=1]
|
|
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
|
|
%13 = fmul double %3, %12 ; <double> [#uses=1]
|
|
%14 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
|
|
%15 = getelementptr inbounds double* %d, i64 %14 ; <double*> [#uses=1]
|
|
store double %13, double* %15, align 8
|
|
%16 = add nsw i32 %i.01, 2 ; <i32> [#uses=2]
|
|
br label %bb1
|
|
|
|
bb1: ; preds = %bb
|
|
%17 = icmp slt i32 %16, %n ; <i1> [#uses=1]
|
|
br i1 %17, label %bb, label %bb1.return_crit_edge
|
|
|
|
bb1.return_crit_edge: ; preds = %bb1
|
|
br label %return
|
|
|
|
return: ; preds = %bb1.return_crit_edge, %entry
|
|
ret void
|
|
}
|
|
|
|
; CHECK: Loop %bb: backedge-taken count is ((-1 + (2 * (%no /u 2))) /u 2)
|
|
; CHECK: Loop %bb: max backedge-taken count is 1073741822
|