mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-10-31 08:16:47 +00:00 
			
		
		
		
	Making use of VFP / NEON floating point multiply-accumulate / subtraction is
difficult on current ARM implementations for a few reasons.
1. Even though a single vmla has latency that is one cycle shorter than a pair
   of vmul + vadd, a RAW hazard during the first (4? on Cortex-a8) can cause
   additional pipeline stall. So it's frequently better to single codegen
   vmul + vadd.
2. A vmla folowed by a vmul, vmadd, or vsub causes the second fp instruction to
   stall for 4 cycles. We need to schedule them apart.
3. A vmla followed vmla is a special case. Obvious issuing back to back RAW
   vmla + vmla is very bad. But this isn't ideal either:
     vmul
     vadd
     vmla
   Instead, we want to expand the second vmla:
     vmla
     vmul
     vadd
   Even with the 4 cycle vmul stall, the second sequence is still 2 cycles
   faster.
Up to now, isel simply avoid codegen'ing fp vmla / vmls. This works well enough
but it isn't the optimial solution. This patch attempts to make it possible to
use vmla / vmls in cases where it is profitable.
A. Add missing isel predicates which cause vmla to be codegen'ed.
B. Make sure the fmul in (fadd (fmul)) has a single use. We don't want to
   compute a fmul and a fmla.
C. Add additional isel checks for vmla, avoid cases where vmla is feeding into
   fp instructions (except for the #3 exceptional case).
D. Add ARM hazard recognizer to model the vmla / vmls hazards.
E. Add a special pre-regalloc case to expand vmla / vmls when it's likely the
   vmla / vmls will trigger one of the special hazards.
Enable these fp vmlx codegen changes for Cortex-A9.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129775 91177308-0d34-0410-b5e6-96231b3b80d8
		
	
		
			
				
	
	
		
			105 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			LLVM
		
	
	
	
	
	
			
		
		
	
	
			105 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			LLVM
		
	
	
	
	
	
| ; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
 | |
| ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
 | |
| ; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
 | |
| ; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=A9
 | |
| ; RUN: llc < %s -mtriple=arm-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s -check-prefix=HARD
 | |
| 
 | |
| define float @t1(float %acc, float %a, float %b) {
 | |
| entry:
 | |
| ; VFP2: t1:
 | |
| ; VFP2: vmla.f32
 | |
| 
 | |
| ; NEON: t1:
 | |
| ; NEON: vmla.f32
 | |
| 
 | |
| ; A8: t1:
 | |
| ; A8: vmul.f32
 | |
| ; A8: vadd.f32
 | |
| 	%0 = fmul float %a, %b
 | |
|         %1 = fadd float %acc, %0
 | |
| 	ret float %1
 | |
| }
 | |
| 
 | |
| define double @t2(double %acc, double %a, double %b) {
 | |
| entry:
 | |
| ; VFP2: t2:
 | |
| ; VFP2: vmla.f64
 | |
| 
 | |
| ; NEON: t2:
 | |
| ; NEON: vmla.f64
 | |
| 
 | |
| ; A8: t2:
 | |
| ; A8: vmul.f64
 | |
| ; A8: vadd.f64
 | |
| 	%0 = fmul double %a, %b
 | |
|         %1 = fadd double %acc, %0
 | |
| 	ret double %1
 | |
| }
 | |
| 
 | |
| define float @t3(float %acc, float %a, float %b) {
 | |
| entry:
 | |
| ; VFP2: t3:
 | |
| ; VFP2: vmla.f32
 | |
| 
 | |
| ; NEON: t3:
 | |
| ; NEON: vmla.f32
 | |
| 
 | |
| ; A8: t3:
 | |
| ; A8: vmul.f32
 | |
| ; A8: vadd.f32
 | |
| 	%0 = fmul float %a, %b
 | |
|         %1 = fadd float %0, %acc
 | |
| 	ret float %1
 | |
| }
 | |
| 
 | |
| ; It's possible to make use of fp vmla / vmls on Cortex-A9.
 | |
| ; rdar://8659675
 | |
| define void @t4(float %acc1, float %a, float %b, float %acc2, float %c, float* %P1, float* %P2) {
 | |
| entry:
 | |
| ; A8: t4:
 | |
| ; A8: vmul.f32
 | |
| ; A8: vmul.f32
 | |
| ; A8: vadd.f32
 | |
| ; A8: vadd.f32
 | |
| 
 | |
| ; Two vmla with now RAW hazard
 | |
| ; A9: t4:
 | |
| ; A9: vmla.f32
 | |
| ; A9: vmla.f32
 | |
| 
 | |
| ; HARD: t4:
 | |
| ; HARD: vmla.f32 s0, s1, s2
 | |
| ; HARD: vmla.f32 s3, s1, s4
 | |
|   %0 = fmul float %a, %b
 | |
|   %1 = fadd float %acc1, %0
 | |
|   %2 = fmul float %a, %c
 | |
|   %3 = fadd float %acc2, %2
 | |
|   store float %1, float* %P1
 | |
|   store float %3, float* %P2
 | |
|   ret void
 | |
| }
 | |
| 
 | |
| define float @t5(float %a, float %b, float %c, float %d, float %e) {
 | |
| entry:
 | |
| ; A8: t5:
 | |
| ; A8: vmul.f32
 | |
| ; A8: vmul.f32
 | |
| ; A8: vadd.f32
 | |
| ; A8: vadd.f32
 | |
| 
 | |
| ; A9: t5:
 | |
| ; A9: vmla.f32
 | |
| ; A9: vmul.f32
 | |
| ; A9: vadd.f32
 | |
| 
 | |
| ; HARD: t5:
 | |
| ; HARD: vmla.f32 s4, s0, s1
 | |
| ; HARD: vmul.f32 s0, s2, s3
 | |
| ; HARD: vadd.f32 s0, s4, s0
 | |
|   %0 = fmul float %a, %b
 | |
|   %1 = fadd float %e, %0
 | |
|   %2 = fmul float %c, %d
 | |
|   %3 = fadd float %1, %2
 | |
|   ret float %3
 | |
| }
 |