diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index 25f7a78f667..c1accd32c5c 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1664,23 +1664,7 @@ bool SLPVectorizer::vectorizeStoreChain(ArrayRef Chain, } } - if (Changed || ChainLen > VF) return Changed; - - // Handle short chains. This helps us catch types such as <3 x float> that - // are smaller than vector size. - R.buildTree(Chain); - - int Cost = R.getTreeCost(); - - if (Cost < CostThreshold) { - DEBUG(dbgs() << "SLP: Found store chain cost = " << Cost - << " for size = " << ChainLen << "\n"); - R.vectorizeTree(); - return true; - } - - return false; } bool SLPVectorizer::vectorizeStores(ArrayRef Stores, diff --git a/test/Transforms/SLPVectorizer/X86/cycle_dup.ll b/test/Transforms/SLPVectorizer/X86/cycle_dup.ll index 501fae17855..fba35499fb7 100644 --- a/test/Transforms/SLPVectorizer/X86/cycle_dup.ll +++ b/test/Transforms/SLPVectorizer/X86/cycle_dup.ll @@ -3,55 +3,61 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.9.0" - ; int foo(int *A) { -; int r = A[0], g = A[1], b = A[2]; -; for (int i=0; i < A[13]; i++) -; r*=18; g*=19; b*=12; -; A[0] = r; A[1] = g; A[2] = b; +; int r = A[0], g = A[1], b = A[2], a = A[3]; +; for (int i=0; i < A[13]; i++) { +; r*=18; g*=19; b*=12; a *=9; +; } +; A[0] = r; A[1] = g; A[2] = b; A[3] = a; ; } ;CHECK-LABEL: @foo -;CHECK: bitcast i32* %A to <3 x i32>* -;CHECK-NEXT: load <3 x i32> -;CHECK: phi <3 x i32> -;CHECK-NEXT: mul <3 x i32> +;CHECK: bitcast i32* %A to <4 x i32>* +;CHECK-NEXT: load <4 x i32> +;CHECK: phi <4 x i32> +;CHECK-NEXT: mul <4 x i32> ;CHECK-NOT: mul -;CHECK: phi <3 x i32> -;CHECK: bitcast i32* %A to <3 x i32>* -;CHECK-NEXT: store <3 x i32> +;CHECK: phi <4 x i32> +;CHECK: bitcast i32* %A to <4 x i32>* +;CHECK-NEXT: store <4 x i32> ;CHECK-NEXT:ret i32 undef -define i32 @foo(i32* nocapture %A) { +define i32 @foo(i32* nocapture %A) #0 { entry: %0 = load i32* %A, align 4 %arrayidx1 = getelementptr inbounds i32* %A, i64 1 %1 = load i32* %arrayidx1, align 4 %arrayidx2 = getelementptr inbounds i32* %A, i64 2 %2 = load i32* %arrayidx2, align 4 - %arrayidx3 = getelementptr inbounds i32* %A, i64 13 + %arrayidx3 = getelementptr inbounds i32* %A, i64 3 %3 = load i32* %arrayidx3, align 4 - %cmp18 = icmp sgt i32 %3, 0 - br i1 %cmp18, label %for.body, label %for.end + %arrayidx4 = getelementptr inbounds i32* %A, i64 13 + %4 = load i32* %arrayidx4, align 4 + %cmp24 = icmp sgt i32 %4, 0 + br i1 %cmp24, label %for.body, label %for.end for.body: ; preds = %entry, %for.body - %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ] - %b.021 = phi i32 [ %mul5, %for.body ], [ %2, %entry ] - %g.020 = phi i32 [ %mul4, %for.body ], [ %1, %entry ] - %r.019 = phi i32 [ %mul, %for.body ], [ %0, %entry ] - %mul = mul nsw i32 %r.019, 18 - %mul4 = mul nsw i32 %g.020, 19 - %mul5 = mul nsw i32 %b.021, 12 - %inc = add nsw i32 %i.022, 1 - %cmp = icmp slt i32 %inc, %3 + %i.029 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %a.028 = phi i32 [ %mul7, %for.body ], [ %3, %entry ] + %b.027 = phi i32 [ %mul6, %for.body ], [ %2, %entry ] + %g.026 = phi i32 [ %mul5, %for.body ], [ %1, %entry ] + %r.025 = phi i32 [ %mul, %for.body ], [ %0, %entry ] + %mul = mul nsw i32 %r.025, 18 + %mul5 = mul nsw i32 %g.026, 19 + %mul6 = mul nsw i32 %b.027, 12 + %mul7 = mul nsw i32 %a.028, 9 + %inc = add nsw i32 %i.029, 1 + %cmp = icmp slt i32 %inc, %4 br i1 %cmp, label %for.body, label %for.end for.end: ; preds = %for.body, %entry - %b.0.lcssa = phi i32 [ %2, %entry ], [ %mul5, %for.body ] - %g.0.lcssa = phi i32 [ %1, %entry ], [ %mul4, %for.body ] + %a.0.lcssa = phi i32 [ %3, %entry ], [ %mul7, %for.body ] + %b.0.lcssa = phi i32 [ %2, %entry ], [ %mul6, %for.body ] + %g.0.lcssa = phi i32 [ %1, %entry ], [ %mul5, %for.body ] %r.0.lcssa = phi i32 [ %0, %entry ], [ %mul, %for.body ] store i32 %r.0.lcssa, i32* %A, align 4 store i32 %g.0.lcssa, i32* %arrayidx1, align 4 store i32 %b.0.lcssa, i32* %arrayidx2, align 4 + store i32 %a.0.lcssa, i32* %arrayidx3, align 4 ret i32 undef } diff --git a/test/Transforms/SLPVectorizer/X86/odd_store.ll b/test/Transforms/SLPVectorizer/X86/odd_store.ll index de6503b477f..027f6016e2b 100644 --- a/test/Transforms/SLPVectorizer/X86/odd_store.ll +++ b/test/Transforms/SLPVectorizer/X86/odd_store.ll @@ -10,12 +10,12 @@ target triple = "x86_64-apple-macosx10.8.0" ;} ;CHECK-LABEL: @foo( -;CHECK: load <3 x float> -;CHECK: fmul <3 x float> -;CHECK: fpext <3 x float> -;CHECK: fadd <3 x double> -;CHECK: fptosi <3 x double> -;CHECK: store <3 x i8> +;CHECK-NOT: load <3 x float> +;CHECK-NOT: fmul <3 x float> +;CHECK-NOT: fpext <3 x float> +;CHECK-NOT: fadd <3 x double> +;CHECK-NOT: fptosi <3 x double> +;CHECK-NOT: store <3 x i8> ;CHECK: ret define i32 @foo(i8* noalias nocapture %A, float* noalias nocapture %B, float %T) { %1 = getelementptr inbounds float* %B, i64 10