mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-07 14:33:15 +00:00
523f800e90
For functions where esi is used as base pointer, we would previously fall back from lowering memcpy with "rep movs" because that clobbers esi. With this patch, we just store esi in another physical register, and restore it afterwards. This adds a little bit of register preassure, but the more efficient memcpy should be worth it. Differential Revision: http://llvm-reviews.chandlerc.com/D2968 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@204174 91177308-0d34-0410-b5e6-96231b3b80d8
20 lines
647 B
LLVM
20 lines
647 B
LLVM
; RUN: llc < %s -force-align-stack -mtriple i386-apple-darwin -mcpu=i486 | FileCheck %s
|
|
|
|
%struct.foo = type { [88 x i8] }
|
|
|
|
declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind
|
|
|
|
; PR19012
|
|
; Don't clobber %esi if we have inline asm that clobbers %esp.
|
|
define void @test1(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind {
|
|
call void @bar(i8* %z, %struct.foo* align 4 byval %x)
|
|
call void asm sideeffect inteldialect "xor esp, esp", "=*m,~{flags},~{esp},~{esp},~{dirflag},~{fpsr},~{flags}"(i8* %z)
|
|
ret void
|
|
|
|
; CHECK-LABEL: test1:
|
|
; CHECK: movl %esp, %esi
|
|
; CHECK: movl %esi, %edx
|
|
; CHECK: rep;movsl
|
|
; CHECK: movl %edx, %esi
|
|
}
|