This patch recognizes (+ (+ v0, v1) (+ v2, v3)), reorders them for bundling into vector of loads,

and vectorizes it. 
 
 Test case :
 
       float hadd(float* a) {
           return (a[0] + a[1]) + (a[2] + a[3]);
        }
 
 
 AArch64 assembly before patch :
 
        ldp	s0, s1, [x0]
 	ldp	s2, s3, [x0, #8]
 	fadd	s0, s0, s1
 	fadd	s1, s2, s3
 	fadd	s0, s0, s1
 	ret
 
 AArch64 assembly after patch :
 
        ldp	d0, d1, [x0]
 	fadd	v0.2s, v0.2s, v1.2s
 	faddp	s0, v0.2s
 	ret

Reviewed Link : http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20141208/248531.html



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224119 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Suyog Sarda
2014-12-12 12:53:44 +00:00
parent c15d82e259
commit 1dea0dc279
2 changed files with 51 additions and 2 deletions

View File

@@ -0,0 +1,27 @@
; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu -mcpu=cortex-a57 | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-gnu"
; float hadd (float *a) {
; return (a[0] + a[1]) + (a[2] + a[3]);
; }
; CHECK_LABEL: @hadd
; CHECK: load <2 x float>*
; CHECK: fadd <2 x float>
; CHECK: extractelement <2 x float>
define float @hadd(float* nocapture readonly %a) {
entry:
%0 = load float* %a, align 4
%arrayidx1 = getelementptr inbounds float* %a, i64 1
%1 = load float* %arrayidx1, align 4
%add = fadd float %0, %1
%arrayidx2 = getelementptr inbounds float* %a, i64 2
%2 = load float* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds float* %a, i64 3
%3 = load float* %arrayidx3, align 4
%add4 = fadd float %2, %3
%add5 = fadd float %add, %add4
ret float %add5
}