mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-15 07:34:33 +00:00
72062f5744
This patch adds support for AArch64 (ARM's 64-bit architecture) to LLVM in the "experimental" category. Currently, it won't be built unless requested explicitly. This initial commit should have support for: + Assembly of all scalar (i.e. non-NEON, non-Crypto) instructions (except the late addition CRC instructions). + CodeGen features required for C++03 and C99. + Compilation for the "small" memory model: code+static data < 4GB. + Absolute and position-independent code. + GNU-style (i.e. "__thread") TLS. + Debugging information. The principal omission, currently, is performance tuning. This patch excludes the NEON support also reviewed due to an outbreak of batshit insanity in our legal department. That will be committed soon bringing the changes to precisely what has been approved. Further reviews would be gratefully received. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174054 91177308-0d34-0410-b5e6-96231b3b80d8
134 lines
3.3 KiB
LLVM
134 lines
3.3 KiB
LLVM
; RUN: llc -march=aarch64 -verify-machineinstrs < %s | FileCheck %s
|
|
|
|
declare void @use_addr(i8*)
|
|
|
|
define void @test_simple_alloca(i64 %n) {
|
|
; CHECK: test_simple_alloca:
|
|
|
|
%buf = alloca i8, i64 %n
|
|
; Make sure we align the stack change to 16 bytes:
|
|
; CHECK: add [[SPDELTA:x[0-9]+]], x0, #15
|
|
; CHECK: and x0, [[SPDELTA]], #0xfffffffffffffff0
|
|
|
|
; Make sure we change SP. It would be surprising if anything but x0 were used
|
|
; for the final sp, but it could be if it was then moved into x0.
|
|
; CHECK: mov [[TMP:x[0-9]+]], sp
|
|
; CHECK: sub x0, [[TMP]], [[SPDELTA]]
|
|
; CHECK: mov sp, x0
|
|
|
|
call void @use_addr(i8* %buf)
|
|
; CHECK: bl use_addr
|
|
|
|
ret void
|
|
; Make sure epilogue restores sp from fp
|
|
; CHECK: sub sp, x29, #16
|
|
; CHECK: ldp x29, x30, [sp, #16]
|
|
; CHECK: add sp, sp, #32
|
|
; CHECK: ret
|
|
}
|
|
|
|
declare void @use_addr_loc(i8*, i64*)
|
|
|
|
define i64 @test_alloca_with_local(i64 %n) {
|
|
; CHECK: test_alloca_with_local:
|
|
; CHECK: sub sp, sp, #32
|
|
; CHECK: stp x29, x30, [sp, #16]
|
|
|
|
%loc = alloca i64
|
|
%buf = alloca i8, i64 %n
|
|
; Make sure we align the stack change to 16 bytes:
|
|
; CHECK: add [[SPDELTA:x[0-9]+]], x0, #15
|
|
; CHECK: and x0, [[SPDELTA]], #0xfffffffffffffff0
|
|
|
|
; Make sure we change SP. It would be surprising if anything but x0 were used
|
|
; for the final sp, but it could be if it was then moved into x0.
|
|
; CHECK: mov [[TMP:x[0-9]+]], sp
|
|
; CHECK: sub x0, [[TMP]], [[SPDELTA]]
|
|
; CHECK: mov sp, x0
|
|
|
|
; Obviously suboptimal code here, but it to get &local in x1
|
|
; CHECK: sub [[TMP:x[0-9]+]], x29, [[LOC_FROM_FP:#[0-9]+]]
|
|
; CHECK: add x1, [[TMP]], #0
|
|
|
|
call void @use_addr_loc(i8* %buf, i64* %loc)
|
|
; CHECK: bl use_addr
|
|
|
|
%val = load i64* %loc
|
|
; CHECK: sub x[[TMP:[0-9]+]], x29, [[LOC_FROM_FP]]
|
|
; CHECK: ldr x0, [x[[TMP]]]
|
|
|
|
ret i64 %val
|
|
; Make sure epilogue restores sp from fp
|
|
; CHECK: sub sp, x29, #16
|
|
; CHECK: ldp x29, x30, [sp, #16]
|
|
; CHECK: add sp, sp, #32
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_variadic_alloca(i64 %n, ...) {
|
|
; CHECK: test_variadic_alloca:
|
|
|
|
; CHECK: sub sp, sp, #208
|
|
; CHECK: stp x29, x30, [sp, #192]
|
|
; CHECK: add x29, sp, #192
|
|
; CHECK: sub x9, x29, #192
|
|
; CHECK: add x8, x9, #0
|
|
; CHECK: str q7, [x8, #112]
|
|
; [...]
|
|
; CHECK: str q1, [x8, #16]
|
|
|
|
%addr = alloca i8, i64 %n
|
|
|
|
call void @use_addr(i8* %addr)
|
|
; CHECK: bl use_addr
|
|
|
|
ret void
|
|
; CHECK: sub sp, x29, #192
|
|
; CHECK: ldp x29, x30, [sp, #192]
|
|
; CHECK: add sp, sp, #208
|
|
}
|
|
|
|
define void @test_alloca_large_frame(i64 %n) {
|
|
; CHECK: test_alloca_large_frame:
|
|
|
|
; CHECK: sub sp, sp, #496
|
|
; CHECK: stp x29, x30, [sp, #480]
|
|
; CHECK: add x29, sp, #480
|
|
; CHECK: sub sp, sp, #48
|
|
; CHECK: sub sp, sp, #1953, lsl #12
|
|
|
|
%addr1 = alloca i8, i64 %n
|
|
%addr2 = alloca i64, i64 1000000
|
|
|
|
call void @use_addr_loc(i8* %addr1, i64* %addr2)
|
|
|
|
ret void
|
|
; CHECK: sub sp, x29, #480
|
|
; CHECK: ldp x29, x30, [sp, #480]
|
|
; CHECK: add sp, sp, #496
|
|
}
|
|
|
|
declare i8* @llvm.stacksave()
|
|
declare void @llvm.stackrestore(i8*)
|
|
|
|
define void @test_scoped_alloca(i64 %n) {
|
|
; CHECK: test_scoped_alloca
|
|
; CHECK: sub sp, sp, #32
|
|
|
|
%sp = call i8* @llvm.stacksave()
|
|
; CHECK: mov [[SAVED_SP:x[0-9]+]], sp
|
|
|
|
%addr = alloca i8, i64 %n
|
|
; CHECK: and [[SPDELTA:x[0-9]+]], {{x[0-9]+}}, #0xfffffffffffffff0
|
|
; CHECK: mov [[OLDSP:x[0-9]+]], sp
|
|
; CHECK: sub [[NEWSP:x[0-9]+]], [[OLDSP]], [[SPDELTA]]
|
|
; CHECK: mov sp, [[NEWSP]]
|
|
|
|
call void @use_addr(i8* %addr)
|
|
; CHECK: bl use_addr
|
|
|
|
call void @llvm.stackrestore(i8* %sp)
|
|
; CHECK: mov sp, [[SAVED_SP]]
|
|
|
|
ret void
|
|
} |