Boost the effective chain depth of loads and stores.

By default, boost the chain depth contribution of loads and stores. This will allow a load/store pair to vectorize even when it would not otherwise be long enough to satisfy the chain depth requirement.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@149761 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Hal Finkel 2012-02-04 04:14:04 +00:00
parent fd4b8e2bac
commit edc8db87dc
2 changed files with 32 additions and 0 deletions

View File

@ -103,6 +103,11 @@ static cl::opt<bool>
AlignedOnly("bb-vectorize-aligned-only", cl::init(false), cl::Hidden,
cl::desc("Only generate aligned loads and stores"));
static cl::opt<bool>
NoMemOpBoost("bb-vectorize-no-mem-op-boost",
cl::init(false), cl::Hidden,
cl::desc("Don't boost the chain-depth contribution of loads and stores"));
static cl::opt<bool>
FastDep("bb-vectorize-fast-dep", cl::init(false), cl::Hidden,
cl::desc("Use a fast instruction dependency analysis"));
@ -340,6 +345,11 @@ namespace {
if (isa<InsertElementInst>(V) || isa<ExtractElementInst>(V))
return 0;
// Give a load or store half of the required depth so that load/store
// pairs will vectorize.
if (!NoMemOpBoost && (isa<LoadInst>(V) || isa<StoreInst>(V)))
return ReqChainDepth/2;
return 1;
}

View File

@ -0,0 +1,22 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=6 -instcombine -gvn -S | FileCheck %s
@A = common global [1024 x float] zeroinitializer, align 16
@B = common global [1024 x float] zeroinitializer, align 16
define i32 @test1() nounwind {
; CHECK: @test1
%V1 = load float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 0), align 16
%V2 = load float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 1), align 4
%V3= load float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 2), align 8
%V4 = load float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 3), align 4
; CHECK: %V1 = load <4 x float>* bitcast ([1024 x float]* @A to <4 x float>*), align 16
store float %V1, float* getelementptr inbounds ([1024 x float]* @B, i64 0, i64 0), align 16
store float %V2, float* getelementptr inbounds ([1024 x float]* @B, i64 0, i64 1), align 4
store float %V3, float* getelementptr inbounds ([1024 x float]* @B, i64 0, i64 2), align 8
store float %V4, float* getelementptr inbounds ([1024 x float]* @B, i64 0, i64 3), align 4
; CHECK-NEXT: store <4 x float> %V1, <4 x float>* bitcast ([1024 x float]* @B to <4 x float>*), align 16
ret i32 0
; CHECK-NEXT: ret i32 0
}