mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
7c9c6ed761
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230794 91177308-0d34-0410-b5e6-96231b3b80d8
60 lines
2.3 KiB
LLVM
60 lines
2.3 KiB
LLVM
; RUN: llc -O0 -verify-machineinstrs -mtriple=armv7-apple-darwin < %s
|
|
; RUN: llc -O0 -verify-machineinstrs -mtriple=armv7-linux-gnueabi < %s
|
|
|
|
define i32 @main() nounwind ssp {
|
|
entry:
|
|
%retval = alloca i32, align 4
|
|
%X = alloca <4 x i32>, align 16
|
|
%Y = alloca <4 x float>, align 16
|
|
store i32 0, i32* %retval
|
|
%tmp = load <4 x i32>, <4 x i32>* %X, align 16
|
|
call void @__aa(<4 x i32> %tmp, i8* null, i32 3, <4 x float>* %Y)
|
|
%0 = load i32, i32* %retval
|
|
ret i32 %0
|
|
}
|
|
|
|
define internal void @__aa(<4 x i32> %v, i8* %p, i32 %offset, <4 x float>* %constants) nounwind inlinehint ssp {
|
|
entry:
|
|
%__a.addr.i = alloca <4 x i32>, align 16
|
|
%v.addr = alloca <4 x i32>, align 16
|
|
%p.addr = alloca i8*, align 4
|
|
%offset.addr = alloca i32, align 4
|
|
%constants.addr = alloca <4 x float>*, align 4
|
|
store <4 x i32> %v, <4 x i32>* %v.addr, align 16
|
|
store i8* %p, i8** %p.addr, align 4
|
|
store i32 %offset, i32* %offset.addr, align 4
|
|
store <4 x float>* %constants, <4 x float>** %constants.addr, align 4
|
|
%tmp = load <4 x i32>, <4 x i32>* %v.addr, align 16
|
|
store <4 x i32> %tmp, <4 x i32>* %__a.addr.i, align 16
|
|
%tmp.i = load <4 x i32>, <4 x i32>* %__a.addr.i, align 16
|
|
%0 = bitcast <4 x i32> %tmp.i to <16 x i8>
|
|
%1 = bitcast <16 x i8> %0 to <4 x i32>
|
|
%vcvt.i = sitofp <4 x i32> %1 to <4 x float>
|
|
%tmp1 = load i8*, i8** %p.addr, align 4
|
|
%tmp2 = load i32, i32* %offset.addr, align 4
|
|
%tmp3 = load <4 x float>*, <4 x float>** %constants.addr, align 4
|
|
call void @__bb(<4 x float> %vcvt.i, i8* %tmp1, i32 %tmp2, <4 x float>* %tmp3)
|
|
ret void
|
|
}
|
|
|
|
define internal void @__bb(<4 x float> %v, i8* %p, i32 %offset, <4 x float>* %constants) nounwind inlinehint ssp {
|
|
entry:
|
|
%v.addr = alloca <4 x float>, align 16
|
|
%p.addr = alloca i8*, align 4
|
|
%offset.addr = alloca i32, align 4
|
|
%constants.addr = alloca <4 x float>*, align 4
|
|
%data = alloca i64, align 4
|
|
store <4 x float> %v, <4 x float>* %v.addr, align 16
|
|
store i8* %p, i8** %p.addr, align 4
|
|
store i32 %offset, i32* %offset.addr, align 4
|
|
store <4 x float>* %constants, <4 x float>** %constants.addr, align 4
|
|
%tmp = load i64, i64* %data, align 4
|
|
%tmp1 = load i8*, i8** %p.addr, align 4
|
|
%tmp2 = load i32, i32* %offset.addr, align 4
|
|
%add.ptr = getelementptr i8, i8* %tmp1, i32 %tmp2
|
|
%0 = bitcast i8* %add.ptr to i64*
|
|
%arrayidx = getelementptr inbounds i64, i64* %0, i32 0
|
|
store i64 %tmp, i64* %arrayidx
|
|
ret void
|
|
}
|