1
0
mirror of https://github.com/c64scene-ar/llvm-6502.git synced 2024-12-26 05:32:25 +00:00
llvm-6502/test/CodeGen/PowerPC/retaddr.ll
Chris Lattner d96428597b Fix a significant code quality regression I introduced on PPC64 quite
a while ago.  We now produce:

_foo:
	mflr r0
	std r0, 16(r1)
	ld r2, 16(r1)
	std r2, 0(r3)
	ld r0, 16(r1)
	mtlr r0
	blr 

instead of:

_foo:
	mflr r0
	std r0, 16(r1)
	lis r0, 0
	ori r0, r0, 16
	ldx r2, r1, r0
	std r2, 0(r3)
	ld r0, 16(r1)
	mtlr r0
	blr 

for:

void foo(void **X) {
  *X = __builtin_return_address(0);
}

on ppc64.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44701 91177308-0d34-0410-b5e6-96231b3b80d8
2007-12-08 07:04:58 +00:00

16 lines
395 B
LLVM

; RUN: llvm-as < %s | llc -march=ppc32 | grep mflr
; RUN: llvm-as < %s | llc -march=ppc32 | grep lwz
; RUN: llvm-as < %s | llc -march=ppc64 | grep {ld r., 16(r1)}
target triple = "powerpc-apple-darwin8"
define void @foo(i8** %X) {
entry:
%tmp = tail call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
store i8* %tmp, i8** %X, align 4
ret void
}
declare i8* @llvm.returnaddress(i32)