mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-02 07:32:52 +00:00
8383b539ff
This commit adds the infrastructure for performing bottom-up SLP vectorization (and other optimizations) on parallel computations. The infrastructure has three potential users: 1. The loop vectorizer needs to be able to vectorize AOS data structures such as (sum += A[i] + A[i+1]). 2. The BB-vectorizer needs this infrastructure for bottom-up SLP vectorization, because bottom-up vectorization is faster to compute. 3. A loop-roller needs to be able to analyze consecutive chains and roll them into a loop, in order to reduce code size. A loop roller does not need to create vector instructions, and this infrastructure separates the chain analysis from the vectorization. This patch also includes a simple (100 LOC) bottom up SLP vectorizer that uses the infrastructure, and can vectorize this code: void SAXPY(int *x, int *y, int a, int i) { x[i] = a * x[i] + y[i]; x[i+1] = a * x[i+1] + y[i+1]; x[i+2] = a * x[i+2] + y[i+2]; x[i+3] = a * x[i+3] + y[i+3]; } git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179117 91177308-0d34-0410-b5e6-96231b3b80d8
26 lines
1002 B
LLVM
26 lines
1002 B
LLVM
; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
target triple = "x86_64-apple-macosx10.8.0"
|
|
|
|
; Simple 3-pair chain with loads and stores
|
|
; CHECK: test1
|
|
; CHECK: store <2 x double>
|
|
; CHECK: ret
|
|
define void @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
|
|
entry:
|
|
%i0 = load double* %a, align 8
|
|
%i1 = load double* %b, align 8
|
|
%mul = fmul double %i0, %i1
|
|
%arrayidx3 = getelementptr inbounds double* %a, i64 1
|
|
%i3 = load double* %arrayidx3, align 8
|
|
%arrayidx4 = getelementptr inbounds double* %b, i64 1
|
|
%i4 = load double* %arrayidx4, align 8
|
|
%mul5 = fmul double %i3, %i4
|
|
store double %mul, double* %c, align 8
|
|
%arrayidx5 = getelementptr inbounds double* %c, i64 1
|
|
store double %mul5, double* %arrayidx5, align 8
|
|
ret void
|
|
}
|
|
|