mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-16 11:05:54 +00:00
45ca7c6336
This eliminates a lot of constant pool entries for -O0 builds of code with many global variable accesses. This speeds up -O0 codegen of consumer-typeset by 2x because the constant island pass no longer has to look at thousands of constant pool entries. <rdar://problem/10629774> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@147712 91177308-0d34-0410-b5e6-96231b3b80d8
117 lines
3.8 KiB
LLVM
117 lines
3.8 KiB
LLVM
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
|
|
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
|
|
|
|
@message1 = global [60 x i8] c"The LLVM Compiler Infrastructure\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 1
|
|
@temp = common global [60 x i8] zeroinitializer, align 1
|
|
|
|
define void @t1() nounwind ssp {
|
|
; ARM: t1
|
|
; ARM: movw r0, :lower16:_message1
|
|
; ARM: movt r0, :upper16:_message1
|
|
; ARM: add r0, r0, #5
|
|
; ARM: movw r1, #64
|
|
; ARM: movw r2, #10
|
|
; ARM: uxtb r1, r1
|
|
; ARM: bl _memset
|
|
; THUMB: t1
|
|
; THUMB: movw r0, :lower16:_message1
|
|
; THUMB: movt r0, :upper16:_message1
|
|
; THUMB: adds r0, #5
|
|
; THUMB: movs r1, #64
|
|
; THUMB: movt r1, #0
|
|
; THUMB: movs r2, #10
|
|
; THUMB: movt r2, #0
|
|
; THUMB: uxtb r1, r1
|
|
; THUMB: bl _memset
|
|
call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @message1, i32 0, i32 5), i8 64, i32 10, i32 1, i1 false)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
|
|
|
|
define void @t2() nounwind ssp {
|
|
; ARM: t2
|
|
; ARM: movw r0, :lower16:L_temp$non_lazy_ptr
|
|
; ARM: movt r0, :upper16:L_temp$non_lazy_ptr
|
|
; ARM: ldr r0, [r0]
|
|
; ARM: add r1, r0, #4
|
|
; ARM: add r0, r0, #16
|
|
; ARM: movw r2, #17
|
|
; ARM: str r0, [sp] @ 4-byte Spill
|
|
; ARM: mov r0, r1
|
|
; ARM: ldr r1, [sp] @ 4-byte Reload
|
|
; ARM: bl _memcpy
|
|
; THUMB: t2
|
|
; THUMB: movw r0, :lower16:L_temp$non_lazy_ptr
|
|
; THUMB: movt r0, :upper16:L_temp$non_lazy_ptr
|
|
; THUMB: ldr r0, [r0]
|
|
; THUMB: adds r1, r0, #4
|
|
; THUMB: adds r0, #16
|
|
; THUMB: movs r2, #17
|
|
; THUMB: movt r2, #0
|
|
; THUMB: mov r0, r1
|
|
; THUMB: bl _memcpy
|
|
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 17, i32 1, i1 false)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
|
|
|
|
define void @t3() nounwind ssp {
|
|
; ARM: t3
|
|
; ARM: movw r0, :lower16:L_temp$non_lazy_ptr
|
|
; ARM: movt r0, :upper16:L_temp$non_lazy_ptr
|
|
; ARM: ldr r0, [r0]
|
|
; ARM: add r1, r0, #4
|
|
; ARM: add r0, r0, #16
|
|
; ARM: movw r2, #10
|
|
; ARM: mov r0, r1
|
|
; ARM: bl _memmove
|
|
; THUMB: t3
|
|
; THUMB: movw r0, :lower16:L_temp$non_lazy_ptr
|
|
; THUMB: movt r0, :upper16:L_temp$non_lazy_ptr
|
|
; THUMB: ldr r0, [r0]
|
|
; THUMB: adds r1, r0, #4
|
|
; THUMB: adds r0, #16
|
|
; THUMB: movs r2, #10
|
|
; THUMB: movt r2, #0
|
|
; THUMB: mov r0, r1
|
|
; THUMB: bl _memmove
|
|
call void @llvm.memmove.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 10, i32 1, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @t4() nounwind ssp {
|
|
; ARM: t4
|
|
; ARM: movw r0, :lower16:L_temp$non_lazy_ptr
|
|
; ARM: movt r0, :upper16:L_temp$non_lazy_ptr
|
|
; ARM: ldr r0, [r0]
|
|
; ARM: movw r1, :lower16:L_temp$non_lazy_ptr
|
|
; ARM: movt r1, :upper16:L_temp$non_lazy_ptr
|
|
; ARM: ldr r1, [r1]
|
|
; ARM: ldr r2, [r1, #16]
|
|
; ARM: str r2, [r0, #4]
|
|
; ARM: ldr r2, [r1, #20]
|
|
; ARM: str r2, [r0, #8]
|
|
; ARM: ldrh r1, [r1, #24]
|
|
; ARM: strh r1, [r0, #12]
|
|
; ARM: bx lr
|
|
; THUMB: movw r0, :lower16:L_temp$non_lazy_ptr
|
|
; THUMB: movt r0, :upper16:L_temp$non_lazy_ptr
|
|
; THUMB: ldr r0, [r0]
|
|
; THUMB: movw r1, :lower16:L_temp$non_lazy_ptr
|
|
; THUMB: movt r1, :upper16:L_temp$non_lazy_ptr
|
|
; THUMB: ldr r1, [r1]
|
|
; THUMB: ldr r2, [r1, #16]
|
|
; THUMB: str r2, [r0, #4]
|
|
; THUMB: ldr r2, [r1, #20]
|
|
; THUMB: str r2, [r0, #8]
|
|
; THUMB: ldrh r1, [r1, #24]
|
|
; THUMB: strh r1, [r0, #12]
|
|
; THUMB: bx lr
|
|
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 4), i8* getelementptr inbounds ([60 x i8]* @temp, i32 0, i32 16), i32 10, i32 1, i1 false)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
|