mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-17 03:30:28 +00:00
ae3a0be92e
integer and floating-point opcodes, introducing FAdd, FSub, and FMul. For now, the AsmParser, BitcodeReader, and IRBuilder all preserve backwards compatability, and the Core LLVM APIs preserve backwards compatibility for IR producers. Most front-ends won't need to change immediately. This implements the first step of the plan outlined here: http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72897 91177308-0d34-0410-b5e6-96231b3b80d8
34 lines
880 B
LLVM
34 lines
880 B
LLVM
; RUN: llvm-as < %s | opt -indvars | llvm-dis > %t
|
|
; RUN: not grep and %t
|
|
; RUN: not grep zext %t
|
|
|
|
target datalayout = "-p:64:64:64"
|
|
|
|
define void @foo(double* %d, i64 %n) nounwind {
|
|
entry:
|
|
br label %loop
|
|
|
|
loop:
|
|
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
|
|
%indvar.i8 = and i64 %indvar, 255
|
|
%t0 = getelementptr double* %d, i64 %indvar.i8
|
|
%t1 = load double* %t0
|
|
%t2 = fmul double %t1, 0.1
|
|
store double %t2, double* %t0
|
|
%indvar.i24 = and i64 %indvar, 16777215
|
|
%t3 = getelementptr double* %d, i64 %indvar.i24
|
|
%t4 = load double* %t3
|
|
%t5 = fmul double %t4, 2.3
|
|
store double %t5, double* %t3
|
|
%t6 = getelementptr double* %d, i64 %indvar
|
|
%t7 = load double* %t6
|
|
%t8 = fmul double %t7, 4.5
|
|
store double %t8, double* %t6
|
|
%indvar.next = add i64 %indvar, 1
|
|
%exitcond = icmp eq i64 %indvar.next, 10
|
|
br i1 %exitcond, label %return, label %loop
|
|
|
|
return:
|
|
ret void
|
|
}
|