mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-28 19:31:58 +00:00
1ef70ff39b
Now that `Metadata` is typeless, reflect that in the assembly. These are the matching assembly changes for the metadata/value split in r223802. - Only use the `metadata` type when referencing metadata from a call intrinsic -- i.e., only when it's used as a `Value`. - Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode` when referencing it from call intrinsics. So, assembly like this: define @foo(i32 %v) { call void @llvm.foo(metadata !{i32 %v}, metadata !0) call void @llvm.foo(metadata !{i32 7}, metadata !0) call void @llvm.foo(metadata !1, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{metadata !3}, metadata !0) ret void, !bar !2 } !0 = metadata !{metadata !2} !1 = metadata !{i32* @global} !2 = metadata !{metadata !3} !3 = metadata !{} turns into this: define @foo(i32 %v) { call void @llvm.foo(metadata i32 %v, metadata !0) call void @llvm.foo(metadata i32 7, metadata !0) call void @llvm.foo(metadata i32* @global, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{!3}, metadata !0) ret void, !bar !2 } !0 = !{!2} !1 = !{i32* @global} !2 = !{!3} !3 = !{} I wrote an upgrade script that handled almost all of the tests in llvm and many of the tests in cfe (even handling many `CHECK` lines). I've attached it (or will attach it in a moment if you're speedy) to PR21532 to help everyone update their out-of-tree testcases. This is part of PR21532. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224257 91177308-0d34-0410-b5e6-96231b3b80d8
109 lines
3.5 KiB
LLVM
109 lines
3.5 KiB
LLVM
; RUN: llc -mtriple=powerpc64-bgq-linux -mcpu=a2 < %s | FileCheck %s
|
|
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
|
|
target triple = "powerpc64-bgq-linux"
|
|
|
|
%struct.BG_CoordinateMapping_t = type { [4 x i8] }
|
|
|
|
; Function Attrs: alwaysinline inlinehint nounwind
|
|
define zeroext i32 @Kernel_RanksToCoords(i64 %mapsize, %struct.BG_CoordinateMapping_t* %map, i64* %numentries) #0 {
|
|
entry:
|
|
%mapsize.addr = alloca i64, align 8
|
|
%map.addr = alloca %struct.BG_CoordinateMapping_t*, align 8
|
|
%numentries.addr = alloca i64*, align 8
|
|
%r0 = alloca i64, align 8
|
|
%r3 = alloca i64, align 8
|
|
%r4 = alloca i64, align 8
|
|
%r5 = alloca i64, align 8
|
|
%tmp = alloca i64, align 8
|
|
store i64 %mapsize, i64* %mapsize.addr, align 8
|
|
store %struct.BG_CoordinateMapping_t* %map, %struct.BG_CoordinateMapping_t** %map.addr, align 8
|
|
store i64* %numentries, i64** %numentries.addr, align 8
|
|
store i64 1055, i64* %r0, align 8
|
|
%0 = load i64* %mapsize.addr, align 8
|
|
store i64 %0, i64* %r3, align 8
|
|
%1 = load %struct.BG_CoordinateMapping_t** %map.addr, align 8
|
|
%2 = ptrtoint %struct.BG_CoordinateMapping_t* %1 to i64
|
|
store i64 %2, i64* %r4, align 8
|
|
%3 = load i64** %numentries.addr, align 8
|
|
%4 = ptrtoint i64* %3 to i64
|
|
store i64 %4, i64* %r5, align 8
|
|
%5 = load i64* %r0, align 8
|
|
%6 = load i64* %r3, align 8
|
|
%7 = load i64* %r4, align 8
|
|
%8 = load i64* %r5, align 8
|
|
%9 = call { i64, i64, i64, i64 } asm sideeffect "sc", "={r0},={r3},={r4},={r5},{r0},{r3},{r4},{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 %5, i64 %6, i64 %7, i64 %8) #1, !srcloc !0
|
|
|
|
; CHECK-LABEL: @Kernel_RanksToCoords
|
|
|
|
; These need to be 64-bit loads, not 32-bit loads (not lwz).
|
|
; CHECK-NOT: lwz
|
|
|
|
; CHECK: #APP
|
|
; CHECK: sc
|
|
; CHECK: #NO_APP
|
|
|
|
; CHECK: blr
|
|
|
|
%asmresult = extractvalue { i64, i64, i64, i64 } %9, 0
|
|
%asmresult1 = extractvalue { i64, i64, i64, i64 } %9, 1
|
|
%asmresult2 = extractvalue { i64, i64, i64, i64 } %9, 2
|
|
%asmresult3 = extractvalue { i64, i64, i64, i64 } %9, 3
|
|
store i64 %asmresult, i64* %r0, align 8
|
|
store i64 %asmresult1, i64* %r3, align 8
|
|
store i64 %asmresult2, i64* %r4, align 8
|
|
store i64 %asmresult3, i64* %r5, align 8
|
|
%10 = load i64* %r3, align 8
|
|
store i64 %10, i64* %tmp
|
|
%11 = load i64* %tmp
|
|
%conv = trunc i64 %11 to i32
|
|
ret i32 %conv
|
|
}
|
|
|
|
declare void @mtrace()
|
|
|
|
define signext i32 @main(i32 signext %argc, i8** %argv) {
|
|
entry:
|
|
%argc.addr = alloca i32, align 4
|
|
store i32 %argc, i32* %argc.addr, align 4
|
|
%0 = call { i64, i64 } asm sideeffect "sc", "={r0},={r3},{r0},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{cr0},~{memory}"(i64 1076)
|
|
%asmresult1.i = extractvalue { i64, i64 } %0, 1
|
|
%conv.i = trunc i64 %asmresult1.i to i32
|
|
%cmp = icmp eq i32 %conv.i, 0
|
|
br i1 %cmp, label %if.then, label %if.end
|
|
|
|
; CHECK-LABEL: @main
|
|
|
|
; CHECK-DAG: mr [[REG:[0-9]+]], 3
|
|
; CHECK-DAG: li 0, 1076
|
|
; CHECK: stw [[REG]],
|
|
|
|
; CHECK: #APP
|
|
; CHECK: sc
|
|
; CHECK: #NO_APP
|
|
|
|
; CHECK: cmpwi {{[0-9]+}}, [[REG]], 1
|
|
|
|
; CHECK: blr
|
|
|
|
if.then: ; preds = %entry
|
|
call void @mtrace()
|
|
%.pre = load i32* %argc.addr, align 4
|
|
br label %if.end
|
|
|
|
if.end: ; preds = %if.then, %entry
|
|
%1 = phi i32 [ %.pre, %if.then ], [ %argc, %entry ]
|
|
%cmp1 = icmp slt i32 %1, 2
|
|
br i1 %cmp1, label %usage, label %if.end40
|
|
|
|
usage:
|
|
ret i32 8
|
|
|
|
if.end40:
|
|
ret i32 0
|
|
}
|
|
|
|
attributes #0 = { alwaysinline inlinehint nounwind }
|
|
attributes #1 = { nounwind }
|
|
|
|
!0 = !{i32 -2146895770}
|