mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-25 14:32:53 +00:00
2fabd464ae
ops. This is a rewrite of the IV simplification algorithm used by -disable-iv-rewrite. To avoid perturbing the default mode, I temporarily split the driver and created SimplifyIVUsersNoRewrite. The idea is to avoid doing opcode/pattern matching inside IndVarSimplify. SCEV already does it. We want to optimize with the full generality of SCEV, but optimize def-use chains top down on-demand rather than rewriting the entire expression bottom-up. This was easy to do for operations that SCEV can prove are identity function. So we're now eliminating bitmasks and zero extends this way. A result of this rewrite is that indvars -disable-iv-rewrite no longer requires IVUsers. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@133502 91177308-0d34-0410-b5e6-96231b3b80d8
35 lines
931 B
LLVM
35 lines
931 B
LLVM
; RUN: opt < %s -indvars -S | FileCheck %s
|
|
; RUN: opt < %s -indvars -disable-iv-rewrite -S | FileCheck %s
|
|
; CHECK-NOT: and
|
|
; CHECK-NOT: zext
|
|
|
|
target datalayout = "-p:64:64:64-n:32:64"
|
|
|
|
define void @foo(double* %d, i64 %n) nounwind {
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
|
|
%indvar.i8 = and i64 %indvar, 255
|
|
%t0 = getelementptr double* %d, i64 %indvar.i8
|
|
%t1 = load double* %t0
|
|
%t2 = fmul double %t1, 0.1
|
|
store double %t2, double* %t0
|
|
%indvar.i24 = and i64 %indvar, 16777215
|
|
%t3 = getelementptr double* %d, i64 %indvar.i24
|
|
%t4 = load double* %t3
|
|
%t5 = fmul double %t4, 2.3
|
|
store double %t5, double* %t3
|
|
%t6 = getelementptr double* %d, i64 %indvar
|
|
%t7 = load double* %t6
|
|
%t8 = fmul double %t7, 4.5
|
|
store double %t8, double* %t6
|
|
%indvar.next = add i64 %indvar, 1
|
|
%exitcond = icmp eq i64 %indvar.next, 10
|
|
br i1 %exitcond, label %return, label %loop
|
|
|
|
return:
|
|
ret void
|
|
}
|