mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
0328ca6cd7
Memcpy, and other memory intrinsics, typically tries to use LDM/STM if the source and target addresses are 4-byte aligned. In CodeGenPrepare look for calls to memory intrinsics and, if the object is on the stack, 4-byte align it if it's large enough that we expect that memcpy would want to use LDM/STM to copy it. Differential Revision: http://reviews.llvm.org/D7908 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@232627 91177308-0d34-0410-b5e6-96231b3b80d8
31 lines
1021 B
LLVM
31 lines
1021 B
LLVM
; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -pre-RA-sched=source -disable-post-ra | FileCheck %s
|
|
|
|
define void @t1(i8* nocapture %c) nounwind optsize {
|
|
entry:
|
|
; CHECK-LABEL: t1:
|
|
; CHECK: movs r1, #0
|
|
; CHECK: str r1, [r0]
|
|
; CHECK: str r1, [r0, #4]
|
|
; CHECK: str r1, [r0, #8]
|
|
call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 12, i32 8, i1 false)
|
|
ret void
|
|
}
|
|
|
|
define void @t2() nounwind ssp {
|
|
entry:
|
|
; CHECK-LABEL: t2:
|
|
; CHECK: add.w r1, r0, #10
|
|
; CHECK: vmov.i32 {{q[0-9]+}}, #0x0
|
|
; CHECK: vst1.16 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
|
|
; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
|
|
%buf = alloca [26 x i8], align 1
|
|
%0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0
|
|
call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false)
|
|
call void @something(i8* %0) nounwind
|
|
ret void
|
|
}
|
|
|
|
declare void @something(i8*) nounwind
|
|
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
|
|
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
|