mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
dd469afe15
Summary: This implements the initial version as was proposed earlier this year (http://lists.cs.uiuc.edu/pipermail/llvmdev/2015-January/080462.html). Since then Loop Access Analysis was split out from the Loop Vectorizer and was made into a separate analysis pass. Loop Distribution becomes the second user of this analysis. The pass is off by default and can be enabled with -enable-loop-distribution. There is currently no notion of profitability; if there is a loop with dependence cycles, the pass will try to split them off from other memory operations into a separate loop. I decided to remove the control-dependence calculation from this first version. This and the issues with the PDT are actively discussed so it probably makes sense to treat it separately. Right now I just mark all terminator instruction required which keeps identical CFGs for each distributed loop. This seems to be working pretty well for 456.hmmer where even though there is an empty if-then block in the distributed loop initially, it gets completely removed. The pass keeps DominatorTree and LoopInfo updated. I've tested this with -loop-distribute-verify with the testsuite where we distribute ~90 loops. SimplifyLoop is violated in some cases and I have a FIXME covering this. Reviewers: hfinkel, nadav, aschwaighofer Reviewed By: aschwaighofer Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D8831 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@237358 91177308-0d34-0410-b5e6-96231b3b80d8
113 lines
3.2 KiB
LLVM
113 lines
3.2 KiB
LLVM
; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info -S \
|
|
; RUN: < %s | FileCheck %s
|
|
|
|
; RUN: opt -basicaa -loop-distribute -loop-vectorize -force-vector-width=4 \
|
|
; RUN: -verify-loop-info -verify-dom-info -S < %s | \
|
|
; RUN: FileCheck --check-prefix=VECTORIZE %s
|
|
|
|
; The memcheck version of basic.ll. We should distribute and vectorize the
|
|
; second part of this loop with 5 memchecks (A+1 x {C, D, E} + C x {A, B})
|
|
;
|
|
; for (i = 0; i < n; i++) {
|
|
; A[i + 1] = A[i] * B[i];
|
|
; -------------------------------
|
|
; C[i] = D[i] * E[i];
|
|
; }
|
|
|
|
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
|
|
target triple = "x86_64-apple-macosx10.10.0"
|
|
|
|
@B = common global i32* null, align 8
|
|
@A = common global i32* null, align 8
|
|
@C = common global i32* null, align 8
|
|
@D = common global i32* null, align 8
|
|
@E = common global i32* null, align 8
|
|
|
|
define void @f() {
|
|
entry:
|
|
%a = load i32*, i32** @A, align 8
|
|
%b = load i32*, i32** @B, align 8
|
|
%c = load i32*, i32** @C, align 8
|
|
%d = load i32*, i32** @D, align 8
|
|
%e = load i32*, i32** @E, align 8
|
|
br label %for.body
|
|
|
|
; We have two compares for each array overlap check which is a total of 10
|
|
; compares.
|
|
;
|
|
; CHECK: for.body.ldist.memcheck:
|
|
; CHECK: = icmp
|
|
; CHECK: = icmp
|
|
|
|
; CHECK: = icmp
|
|
; CHECK: = icmp
|
|
|
|
; CHECK: = icmp
|
|
; CHECK: = icmp
|
|
|
|
; CHECK: = icmp
|
|
; CHECK: = icmp
|
|
|
|
; CHECK: = icmp
|
|
; CHECK: = icmp
|
|
|
|
; CHECK-NOT: = icmp
|
|
; CHECK: br i1 %memcheck.conflict, label %for.body.ph.ldist.nondist, label %for.body.ph.ldist1
|
|
|
|
; The non-distributed loop that the memchecks fall back on.
|
|
|
|
; CHECK: for.body.ph.ldist.nondist:
|
|
; CHECK: br label %for.body.ldist.nondist
|
|
; CHECK: for.body.ldist.nondist:
|
|
; CHECK: br i1 %exitcond.ldist.nondist, label %for.end, label %for.body.ldist.nondist
|
|
|
|
; Verify the two distributed loops.
|
|
|
|
; CHECK: for.body.ph.ldist1:
|
|
; CHECK: br label %for.body.ldist1
|
|
; CHECK: for.body.ldist1:
|
|
; CHECK: %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1
|
|
; CHECK: br i1 %exitcond.ldist1, label %for.body.ph, label %for.body.ldist1
|
|
|
|
; CHECK: for.body.ph:
|
|
; CHECK: br label %for.body
|
|
; CHECK: for.body:
|
|
; CHECK: %mulC = mul i32 %loadD, %loadE
|
|
; CHECK: for.end:
|
|
|
|
|
|
; VECTORIZE: mul <4 x i32>
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
|
|
|
|
%arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
|
|
%loadA = load i32, i32* %arrayidxA, align 4
|
|
|
|
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
|
|
%loadB = load i32, i32* %arrayidxB, align 4
|
|
|
|
%mulA = mul i32 %loadB, %loadA
|
|
|
|
%add = add nuw nsw i64 %ind, 1
|
|
%arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
|
|
store i32 %mulA, i32* %arrayidxA_plus_4, align 4
|
|
|
|
%arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
|
|
%loadD = load i32, i32* %arrayidxD, align 4
|
|
|
|
%arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
|
|
%loadE = load i32, i32* %arrayidxE, align 4
|
|
|
|
%mulC = mul i32 %loadD, %loadE
|
|
|
|
%arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
|
|
store i32 %mulC, i32* %arrayidxC, align 4
|
|
|
|
%exitcond = icmp eq i64 %add, 20
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end: ; preds = %for.body
|
|
ret void
|
|
}
|