mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-10-31 08:16:47 +00:00 
			
		
		
		
	make the 'fp return in ST(0)' optimization smart enough to look through token factor nodes. THis allows us to compile testcases like CodeGen/X86/fp-stack-retcopy.ll into: _carg: subl $12, %esp call L_foo$stub fstpl (%esp) fldl (%esp) addl $12, %esp ret instead of: _carg: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret Still not optimal, but much better and this is a trivial patch. Fixing the rest requires invasive surgery that is is not llvm 2.2 material. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46054 91177308-0d34-0410-b5e6-96231b3b80d8
		
			
				
	
	
		
			13 lines
		
	
	
		
			352 B
		
	
	
	
		
			LLVM
		
	
	
	
	
	
			
		
		
	
	
			13 lines
		
	
	
		
			352 B
		
	
	
	
		
			LLVM
		
	
	
	
	
	
| ; This should not copy the result of foo into an xmm register.
 | |
| ; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah -mtriple=i686-apple-darwin9 | not grep xmm
 | |
| ; rdar://5689903
 | |
| 
 | |
| declare double @foo()
 | |
| 
 | |
| define double @carg({ double, double }* byval  %z) nounwind  {
 | |
| entry:
 | |
| 	%tmp5 = tail call double @foo() nounwind 		; <double> [#uses=1]
 | |
| 	ret double %tmp5
 | |
| }
 | |
| 
 |