mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-19 01:13:25 +00:00
ae3a0be92e
integer and floating-point opcodes, introducing FAdd, FSub, and FMul. For now, the AsmParser, BitcodeReader, and IRBuilder all preserve backwards compatability, and the Core LLVM APIs preserve backwards compatibility for IR producers. Most front-ends won't need to change immediately. This implements the first step of the plan outlined here: http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72897 91177308-0d34-0410-b5e6-96231b3b80d8
19 lines
909 B
LLVM
19 lines
909 B
LLVM
; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep li.*16
|
|
; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | not grep addi
|
|
|
|
; Codegen lvx (R+16) as t = li 16, lvx t,R
|
|
; This shares the 16 between the two loads.
|
|
|
|
define void @func(<4 x float>* %a, <4 x float>* %b) {
|
|
%tmp1 = getelementptr <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1]
|
|
%tmp = load <4 x float>* %tmp1 ; <<4 x float>> [#uses=1]
|
|
%tmp3 = getelementptr <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1]
|
|
%tmp4 = load <4 x float>* %tmp3 ; <<4 x float>> [#uses=1]
|
|
%tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
|
|
%tmp8 = load <4 x float>* %b ; <<4 x float>> [#uses=1]
|
|
%tmp9 = fadd <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1]
|
|
store <4 x float> %tmp9, <4 x float>* %a
|
|
ret void
|
|
}
|
|
|