mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-21 00:32:23 +00:00
5fd5b125ff
For example, on 32-bit architecture, don't promote all uses of the IV to 64-bits just because one use is a 64-bit cast. Alternate implementation of the patch by Arnaud de Grandmaison. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@127884 91177308-0d34-0410-b5e6-96231b3b80d8
34 lines
870 B
LLVM
34 lines
870 B
LLVM
; RUN: opt < %s -indvars -S > %t
|
|
; RUN: not grep and %t
|
|
; RUN: not grep zext %t
|
|
|
|
target datalayout = "-p:64:64:64-n:32:64"
|
|
|
|
define void @foo(double* %d, i64 %n) nounwind {
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
|
|
%indvar.i8 = and i64 %indvar, 255
|
|
%t0 = getelementptr double* %d, i64 %indvar.i8
|
|
%t1 = load double* %t0
|
|
%t2 = fmul double %t1, 0.1
|
|
store double %t2, double* %t0
|
|
%indvar.i24 = and i64 %indvar, 16777215
|
|
%t3 = getelementptr double* %d, i64 %indvar.i24
|
|
%t4 = load double* %t3
|
|
%t5 = fmul double %t4, 2.3
|
|
store double %t5, double* %t3
|
|
%t6 = getelementptr double* %d, i64 %indvar
|
|
%t7 = load double* %t6
|
|
%t8 = fmul double %t7, 4.5
|
|
store double %t8, double* %t6
|
|
%indvar.next = add i64 %indvar, 1
|
|
%exitcond = icmp eq i64 %indvar.next, 10
|
|
br i1 %exitcond, label %return, label %loop
|
|
|
|
return:
|
|
ret void
|
|
}
|