mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-02 07:32:52 +00:00
8383b539ff
This commit adds the infrastructure for performing bottom-up SLP vectorization (and other optimizations) on parallel computations. The infrastructure has three potential users: 1. The loop vectorizer needs to be able to vectorize AOS data structures such as (sum += A[i] + A[i+1]). 2. The BB-vectorizer needs this infrastructure for bottom-up SLP vectorization, because bottom-up vectorization is faster to compute. 3. A loop-roller needs to be able to analyze consecutive chains and roll them into a loop, in order to reduce code size. A loop roller does not need to create vector instructions, and this infrastructure separates the chain analysis from the vectorization. This patch also includes a simple (100 LOC) bottom up SLP vectorizer that uses the infrastructure, and can vectorize this code: void SAXPY(int *x, int *y, int a, int i) { x[i] = a * x[i] + y[i]; x[i+1] = a * x[i+1] + y[i+1]; x[i+2] = a * x[i+2] + y[i+2]; x[i+3] = a * x[i+3] + y[i+3]; } git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179117 91177308-0d34-0410-b5e6-96231b3b80d8
48 lines
1.4 KiB
LLVM
48 lines
1.4 KiB
LLVM
; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
target triple = "x86_64-apple-macosx10.7.0"
|
|
|
|
;int foo (int *A, int n) {
|
|
; A[0] += n * 5 + 7;
|
|
; A[1] += n * 5 + 8;
|
|
; A[2] += n * 5 + 9;
|
|
; A[3] += n * 5 + 10;
|
|
; A[4] += n * 5 + 11;
|
|
;}
|
|
|
|
;CHECK: @foo
|
|
;CHECK: insertelement <4 x i32>
|
|
;CHECK: load <4 x i32>
|
|
;CHECK: add <4 x i32>
|
|
;CHECK: store <4 x i32>
|
|
;CHECK: ret
|
|
define i32 @foo(i32* nocapture %A, i32 %n) nounwind ssp uwtable {
|
|
%1 = mul nsw i32 %n, 5
|
|
%2 = add nsw i32 %1, 7
|
|
%3 = load i32* %A, align 4
|
|
%4 = add nsw i32 %2, %3
|
|
store i32 %4, i32* %A, align 4
|
|
%5 = add nsw i32 %1, 8
|
|
%6 = getelementptr inbounds i32* %A, i64 1
|
|
%7 = load i32* %6, align 4
|
|
%8 = add nsw i32 %5, %7
|
|
store i32 %8, i32* %6, align 4
|
|
%9 = add nsw i32 %1, 9
|
|
%10 = getelementptr inbounds i32* %A, i64 2
|
|
%11 = load i32* %10, align 4
|
|
%12 = add nsw i32 %9, %11
|
|
store i32 %12, i32* %10, align 4
|
|
%13 = add nsw i32 %1, 10
|
|
%14 = getelementptr inbounds i32* %A, i64 3
|
|
%15 = load i32* %14, align 4
|
|
%16 = add nsw i32 %13, %15
|
|
store i32 %16, i32* %14, align 4
|
|
%17 = add nsw i32 %1, 11
|
|
%18 = getelementptr inbounds i32* %A, i64 4
|
|
%19 = load i32* %18, align 4
|
|
%20 = add nsw i32 %17, %19
|
|
store i32 %20, i32* %18, align 4
|
|
ret i32 undef
|
|
}
|