diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 0faab7a3c0e..9a832f7a120 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -114,9 +114,9 @@ TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16), /// We don't unroll loops with a known constant trip count below this number. static const unsigned TinyTripCountUnrollThreshold = 128; -/// When performing a runtime memory check, do not check more than this -/// number of pointers. Notice that the check is quadratic! -static const unsigned RuntimeMemoryCheckThreshold = 4; +/// When performing memory disambiguation checks at runtime do not make more +/// than this number of comparisons. +static const unsigned RuntimeMemoryCheckThreshold = 8; /// We use a metadata with this name to indicate that a scalar loop was /// vectorized and that we don't need to re-vectorize it if we run into it @@ -2679,6 +2679,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() { return true; } + unsigned NumReadPtrs = 0; + unsigned NumWritePtrs = 0; + // Find pointers with computable bounds. We are going to use this information // to place a runtime bound check. bool CanDoRT = true; @@ -2687,6 +2690,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() { Value *V = (*MI).first; if (hasComputableBounds(V)) { PtrRtCheck.insert(SE, TheLoop, V, true); + NumWritePtrs++; DEBUG(dbgs() << "LV: Found a runtime check ptr:" << *V <<"\n"); } else { CanDoRT = false; @@ -2697,6 +2701,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() { Value *V = (*MI).first; if (hasComputableBounds(V)) { PtrRtCheck.insert(SE, TheLoop, V, false); + NumReadPtrs++; DEBUG(dbgs() << "LV: Found a runtime check ptr:" << *V <<"\n"); } else { CanDoRT = false; @@ -2706,7 +2711,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() { // Check that we did not collect too many pointers or found a // unsizeable pointer. - if (!CanDoRT || PtrRtCheck.Pointers.size() > RuntimeMemoryCheckThreshold) { + unsigned NumComparisons = (NumWritePtrs * (NumReadPtrs + NumWritePtrs - 1)); + DEBUG(dbgs() << "LV: We need to compare " << NumComparisons << " ptrs.\n"); + if (!CanDoRT || NumComparisons > RuntimeMemoryCheckThreshold) { PtrRtCheck.reset(); CanDoRT = false; } diff --git a/test/Transforms/LoopVectorize/runtime-limit.ll b/test/Transforms/LoopVectorize/runtime-limit.ll new file mode 100644 index 00000000000..d7839746f0e --- /dev/null +++ b/test/Transforms/LoopVectorize/runtime-limit.ll @@ -0,0 +1,84 @@ +; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +; We are vectorizing with 6 runtime checks. +;CHECK: func1x6 +;CHECK: <4 x i32> +;CHECK: ret +define i32 @func1x6(i32* nocapture %out, i32* nocapture %A, i32* nocapture %B, i32* nocapture %C, i32* nocapture %D, i32* nocapture %E, i32* nocapture %F) { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.016 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds i32* %A, i64 %i.016 + %0 = load i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %B, i64 %i.016 + %1 = load i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32* %C, i64 %i.016 + %2 = load i32* %arrayidx2, align 4 + %add3 = add nsw i32 %add, %2 + %arrayidx4 = getelementptr inbounds i32* %E, i64 %i.016 + %3 = load i32* %arrayidx4, align 4 + %add5 = add nsw i32 %add3, %3 + %arrayidx6 = getelementptr inbounds i32* %F, i64 %i.016 + %4 = load i32* %arrayidx6, align 4 + %add7 = add nsw i32 %add5, %4 + %arrayidx8 = getelementptr inbounds i32* %out, i64 %i.016 + store i32 %add7, i32* %arrayidx8, align 4 + %inc = add i64 %i.016, 1 + %exitcond = icmp eq i64 %inc, 256 + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret i32 undef +} + +; We are not vectorizing with 12 runtime checks. +;CHECK: func2x6 +;CHECK-NOT: <4 x i32> +;CHECK: ret +define i32 @func2x6(i32* nocapture %out, i32* nocapture %out2, i32* nocapture %A, i32* nocapture %B, i32* nocapture %C, i32* nocapture %D, i32* nocapture %E, i32* nocapture %F) { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.037 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds i32* %A, i64 %i.037 + %0 = load i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %B, i64 %i.037 + %1 = load i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32* %C, i64 %i.037 + %2 = load i32* %arrayidx2, align 4 + %add3 = add nsw i32 %add, %2 + %arrayidx4 = getelementptr inbounds i32* %E, i64 %i.037 + %3 = load i32* %arrayidx4, align 4 + %add5 = add nsw i32 %add3, %3 + %arrayidx6 = getelementptr inbounds i32* %F, i64 %i.037 + %4 = load i32* %arrayidx6, align 4 + %add7 = add nsw i32 %add5, %4 + %arrayidx8 = getelementptr inbounds i32* %out, i64 %i.037 + store i32 %add7, i32* %arrayidx8, align 4 + %5 = load i32* %arrayidx, align 4 + %6 = load i32* %arrayidx1, align 4 + %add11 = add nsw i32 %6, %5 + %7 = load i32* %arrayidx2, align 4 + %add13 = add nsw i32 %add11, %7 + %8 = load i32* %arrayidx4, align 4 + %add15 = add nsw i32 %add13, %8 + %9 = load i32* %arrayidx6, align 4 + %add17 = add nsw i32 %add15, %9 + %arrayidx18 = getelementptr inbounds i32* %out2, i64 %i.037 + store i32 %add17, i32* %arrayidx18, align 4 + %inc = add i64 %i.037, 1 + %exitcond = icmp eq i64 %inc, 256 + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret i32 undef +} +