move CodeGen/X86/overlap-add.ll here.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36799 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2007-05-05 22:10:24 +00:00
parent 60ef91a3ce
commit 0258011bb9

View File

@ -1004,6 +1004,33 @@ _foo:
movl %edi, %eax
ret
Another example is:
;; X's live range extends beyond the shift, so the register allocator
;; cannot coalesce it with Y. Because of this, a copy needs to be
;; emitted before the shift to save the register value before it is
;; clobbered. However, this copy is not needed if the register
;; allocator turns the shift into an LEA. This also occurs for ADD.
; Check that the shift gets turned into an LEA.
; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
; RUN: not grep {mov E.X, E.X}
%G = external global int
int %test1(int %X, int %Y) {
%Z = add int %X, %Y
volatile store int %Y, int* %G
volatile store int %Z, int* %G
ret int %X
}
int %test2(int %X) {
%Z = add int %X, 1 ;; inc
volatile store int %Z, int* %G
ret int %X
}
//===---------------------------------------------------------------------===//
We use push/pop of stack space around calls in situations where we don't have to.