mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-21 00:32:23 +00:00
4a544a79bd
init.trampoline and adjust.trampoline intrinsics, into two intrinsics like in GCC. While having one combined intrinsic is tempting, it is not natural because typically the trampoline initialization needs to be done in one function, and the result of adjust trampoline is needed in a different (nested) function. To get around this llvm-gcc hacks the nested function lowering code to insert an additional parent variable holding the adjust.trampoline result that can be accessed from the child function. Dragonegg doesn't have the luxury of tweaking GCC code, so it stored the result of adjust.trampoline in the memory GCC set aside for the trampoline itself (this is always available in the child function), and set up some new memory (using an alloca) to hold the trampoline. Unfortunately this breaks Go which allocates trampoline memory on the heap and wants to use it even after the parent has exited (!). Rather than doing even more hacks to get Go working, it seemed best to just use two intrinsics like in GCC. Patch mostly by Sanjoy Das. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@139140 91177308-0d34-0410-b5e6-96231b3b80d8
56 lines
2.0 KiB
LLVM
56 lines
2.0 KiB
LLVM
; Tests to make sure intrinsics are automatically upgraded.
|
|
; RUN: llvm-as < %s | llvm-dis | FileCheck %s
|
|
|
|
|
|
declare <4 x float> @llvm.x86.sse.loadu.ps(i8*) nounwind readnone
|
|
declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readnone
|
|
declare <2 x double> @llvm.x86.sse2.loadu.pd(double*) nounwind readnone
|
|
define void @test_loadu(i8* %a, double* %b) {
|
|
%v0 = call <4 x float> @llvm.x86.sse.loadu.ps(i8* %a)
|
|
%v1 = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a)
|
|
%v2 = call <2 x double> @llvm.x86.sse2.loadu.pd(double* %b)
|
|
|
|
; CHECK: load i128* {{.*}}, align 1
|
|
; CHECK: load i128* {{.*}}, align 1
|
|
; CHECK: load i128* {{.*}}, align 1
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.sse.movnt.ps(i8*, <4 x float>) nounwind readnone
|
|
declare void @llvm.x86.sse2.movnt.dq(i8*, <2 x double>) nounwind readnone
|
|
declare void @llvm.x86.sse2.movnt.pd(i8*, <2 x double>) nounwind readnone
|
|
declare void @llvm.x86.sse2.movnt.i(i8*, i32) nounwind readnone
|
|
|
|
define void @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D) {
|
|
; CHECK: store{{.*}}nontemporal
|
|
call void @llvm.x86.sse.movnt.ps(i8* %B, <4 x float> %A)
|
|
; CHECK: store{{.*}}nontemporal
|
|
call void @llvm.x86.sse2.movnt.dq(i8* %B, <2 x double> %C)
|
|
; CHECK: store{{.*}}nontemporal
|
|
call void @llvm.x86.sse2.movnt.pd(i8* %B, <2 x double> %C)
|
|
; CHECK: store{{.*}}nontemporal
|
|
call void @llvm.x86.sse2.movnt.i(i8* %B, i32 %D)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.prefetch(i8*, i32, i32) nounwind
|
|
|
|
define void @p(i8* %ptr) {
|
|
; CHECK: llvm.prefetch(i8* %ptr, i32 0, i32 1, i32 1)
|
|
tail call void @llvm.prefetch(i8* %ptr, i32 0, i32 1)
|
|
ret void
|
|
}
|
|
|
|
declare i32 @nest_f(i8* nest, i32)
|
|
declare i8* @llvm.init.trampoline(i8*, i8*, i8*)
|
|
|
|
define void @test_trampolines() {
|
|
; CHECK: call void @llvm.init.trampoline(i8* null, i8* bitcast (i32 (i8*, i32)* @nest_f to i8*), i8* null)
|
|
; CHECK: call i8* @llvm.adjust.trampoline(i8* null)
|
|
|
|
call i8* @llvm.init.trampoline(i8* null,
|
|
i8* bitcast (i32 (i8*, i32)* @nest_f to i8*),
|
|
i8* null)
|
|
ret void
|
|
}
|