mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
32b845d223
See r230786 and r230794 for similar changes to gep and load respectively. Call is a bit different because it often doesn't have a single explicit type - usually the type is deduced from the arguments, and just the return type is explicit. In those cases there's no need to change the IR. When that's not the case, the IR usually contains the pointer type of the first operand - but since typed pointers are going away, that representation is insufficient so I'm just stripping the "pointerness" of the explicit type away. This does make the IR a bit weird - it /sort of/ reads like the type of the first operand: "call void () %x(" but %x is actually of type "void ()*" and will eventually be just of type "ptr". But this seems not too bad and I don't think it would benefit from repeating the type ("void (), void () * %x(" and then eventually "void (), ptr %x(") as has been done with gep and load. This also has a side benefit: since the explicit type is no longer a pointer, there's no ambiguity between an explicit type and a function that returns a function pointer. Previously this case needed an explicit type (eg: a function returning a void() function was written as "call void () () * @x(" rather than "call void () * @x(" because of the ambiguity between a function returning a pointer to a void() function and a function returning void). No ambiguity means even function pointer return types can just be written alone, without writing the whole function's type. This leaves /only/ the varargs case where the explicit type is required. Given the special type syntax in call instructions, the regex-fu used for migration was a bit more involved in its own unique way (as every one of these is) so here it is. Use it in conjunction with the apply.sh script and associated find/xargs commands I've provided in rr230786 to migrate your out of tree tests. Do let me know if any of this doesn't cover your cases & we can iterate on a more general script/regexes to help others with out of tree tests. About 9 test cases couldn't be automatically migrated - half of those were functions returning function pointers, where I just had to manually delete the function argument types now that we didn't need an explicit function type there. The other half were typedefs of function types used in calls - just had to manually drop the * from those. import fileinput import sys import re pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)') addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$") func_end = re.compile("(?:void.*|\)\s*)\*$") def conv(match, line): if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)): return line return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():] for line in sys.stdin: sys.stdout.write(conv(re.search(pat, line), line)) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@235145 91177308-0d34-0410-b5e6-96231b3b80d8
85 lines
3.4 KiB
LLVM
85 lines
3.4 KiB
LLVM
; RUN: llc -mtriple=arm64-apple-darwin -enable-misched=0 -mcpu=cyclone < %s | FileCheck %s
|
|
; RUN: llc -mtriple=arm64-apple-darwin -enable-misched=0 -mcpu=cyclone -fast-isel -fast-isel-abort=1 < %s | FileCheck %s
|
|
|
|
; Trivial patchpoint codegen
|
|
;
|
|
define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
|
|
entry:
|
|
; CHECK-LABEL: trivial_patchpoint_codegen:
|
|
; CHECK: movz x16, #0xdead, lsl #32
|
|
; CHECK-NEXT: movk x16, #0xbeef, lsl #16
|
|
; CHECK-NEXT: movk x16, #0xcafe
|
|
; CHECK-NEXT: blr x16
|
|
; CHECK: movz x16, #0xdead, lsl #32
|
|
; CHECK-NEXT: movk x16, #0xbeef, lsl #16
|
|
; CHECK-NEXT: movk x16, #0xcaff
|
|
; CHECK-NEXT: blr x16
|
|
; CHECK: ret
|
|
%resolveCall2 = inttoptr i64 244837814094590 to i8*
|
|
%result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
|
|
%resolveCall3 = inttoptr i64 244837814094591 to i8*
|
|
tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 20, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
|
|
ret i64 %result
|
|
}
|
|
|
|
; Caller frame metadata with stackmaps. This should not be optimized
|
|
; as a leaf function.
|
|
;
|
|
; CHECK-LABEL: caller_meta_leaf
|
|
; CHECK: mov x29, sp
|
|
; CHECK-NEXT: sub sp, sp, #32
|
|
; CHECK: Ltmp
|
|
; CHECK: mov sp, x29
|
|
; CHECK: ret
|
|
|
|
define void @caller_meta_leaf() {
|
|
entry:
|
|
%metadata = alloca i64, i32 3, align 8
|
|
store i64 11, i64* %metadata
|
|
store i64 12, i64* %metadata
|
|
store i64 13, i64* %metadata
|
|
call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
|
|
ret void
|
|
}
|
|
|
|
; Test patchpoints reusing the same TargetConstant.
|
|
; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4)
|
|
; There is no way to verify this, since it depends on memory allocation.
|
|
; But I think it's useful to include as a working example.
|
|
define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) {
|
|
entry:
|
|
%tmp80 = add i64 %tmp79, -16
|
|
%tmp81 = inttoptr i64 %tmp80 to i64*
|
|
%tmp82 = load i64, i64* %tmp81, align 8
|
|
tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
|
|
tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
|
|
%tmp83 = load i64, i64* %tmp33, align 8
|
|
%tmp84 = add i64 %tmp83, -24
|
|
%tmp85 = inttoptr i64 %tmp84 to i64*
|
|
%tmp86 = load i64, i64* %tmp85, align 8
|
|
tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
|
|
tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
|
|
ret i64 10
|
|
}
|
|
|
|
; Test small patchpoints that don't emit calls.
|
|
define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
|
|
entry:
|
|
; CHECK-LABEL: small_patchpoint_codegen:
|
|
; CHECK: Ltmp
|
|
; CHECK: nop
|
|
; CHECK-NEXT: nop
|
|
; CHECK-NEXT: nop
|
|
; CHECK-NEXT: nop
|
|
; CHECK-NEXT: nop
|
|
; CHECK-NEXT: ldp
|
|
; CHECK-NEXT: ret
|
|
%result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.experimental.stackmap(i64, i32, ...)
|
|
declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
|
|
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
|
|
|