Don't use runtime bounds check between address spaces.

Don't vectorize with a runtime check if it requires a
comparison between pointers with different address spaces.
The values can't be assumed to be directly comparable.
Previously it would create an illegal bitcast.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191862 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Matt Arsenault 2013-10-02 22:38:17 +00:00
parent 62faf2f9d0
commit 407847f130
3 changed files with 426 additions and 11 deletions

View File

@ -1385,11 +1385,9 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
SmallVector<TrackingVH<Value> , 2> Starts;
SmallVector<TrackingVH<Value> , 2> Ends;
LLVMContext &Ctx = Loc->getContext();
SCEVExpander Exp(*SE, "induction");
// Use this type for pointer arithmetic.
Type* PtrArithTy = Type::getInt8PtrTy(Loc->getContext(), 0);
for (unsigned i = 0; i < NumPointers; ++i) {
Value *Ptr = PtrRtCheck->Pointers[i];
const SCEV *Sc = SE->getSCEV(Ptr);
@ -1401,6 +1399,10 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
Ends.push_back(Ptr);
} else {
DEBUG(dbgs() << "LV: Adding RT check for range:" << *Ptr << '\n');
unsigned AS = Ptr->getType()->getPointerAddressSpace();
// Use this type for pointer arithmetic.
Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
Value *Start = Exp.expandCodeFor(PtrRtCheck->Starts[i], PtrArithTy, Loc);
Value *End = Exp.expandCodeFor(PtrRtCheck->Ends[i], PtrArithTy, Loc);
@ -1422,10 +1424,20 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
if (PtrRtCheck->DependencySetId[i] == PtrRtCheck->DependencySetId[j])
continue;
Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy, "bc");
Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy, "bc");
Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy, "bc");
Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy, "bc");
unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace();
unsigned AS1 = Starts[j]->getType()->getPointerAddressSpace();
assert((AS0 == Ends[j]->getType()->getPointerAddressSpace()) &&
(AS1 == Ends[i]->getType()->getPointerAddressSpace()) &&
"Trying to bounds check pointers with different address spaces");
Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy0, "bc");
Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy1, "bc");
Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy1, "bc");
Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy0, "bc");
Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0");
Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1");
@ -1440,9 +1452,8 @@ InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
// We have to do this trickery because the IRBuilder might fold the check to a
// constant expression in which case there is no Instruction anchored in a
// the block.
LLVMContext &Ctx = Loc->getContext();
Instruction * Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
ConstantInt::getTrue(Ctx));
Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
ConstantInt::getTrue(Ctx));
ChkBuilder.Insert(Check, "memcheck.conflict");
return Check;
}
@ -3166,9 +3177,36 @@ bool AccessAnalysis::canCheckPtrAtRT(
if (IsDepCheckNeeded && CanDoRT && RunningDepId == 2)
NumComparisons = 0; // Only one dependence set.
else
else {
NumComparisons = (NumWritePtrChecks * (NumReadPtrChecks +
NumWritePtrChecks - 1));
}
// If the pointers that we would use for the bounds comparison have different
// address spaces, assume the values aren't directly comparable, so we can't
// use them for the runtime check. We also have to assume they could
// overlap. In the future there should be metadata for whether address spaces
// are disjoint.
unsigned NumPointers = RtCheck.Pointers.size();
for (unsigned i = 0; i < NumPointers; ++i) {
for (unsigned j = i + 1; j < NumPointers; ++j) {
// Only need to check pointers between two different dependency sets.
if (RtCheck.DependencySetId[i] == RtCheck.DependencySetId[j])
continue;
Value *PtrI = RtCheck.Pointers[i];
Value *PtrJ = RtCheck.Pointers[j];
unsigned ASi = PtrI->getType()->getPointerAddressSpace();
unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
if (ASi != ASj) {
DEBUG(dbgs() << "LV: Runtime check would require comparison between"
" different address spaces\n");
return false;
}
}
}
return CanDoRT;
}

View File

@ -0,0 +1,235 @@
; RUN: opt -S -march=r600 -mcpu=cayman -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine < %s | FileCheck %s
; Check vectorization that would ordinarily require a runtime bounds
; check on the pointers when mixing address spaces. For now we cannot
; assume address spaces do not alias, and we can't assume that
; different pointers are directly comparable.
;
; These all test this basic loop for different combinations of address
; spaces, and swapping in globals or adding noalias.
;
;void foo(int addrspace(N)* [noalias] a, int addrspace(M)* [noalias] b, int n)
;{
; for (int i = 0; i < n; ++i)
; {
; a[i] = 3 * b[i];
; }
;}
; Artificial datalayout
target datalayout = "e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
@g_as1 = common addrspace(1) global [1024 x i32] zeroinitializer, align 16
@q_as2 = common addrspace(2) global [1024 x i32] zeroinitializer, align 16
; Both parameters are unidentified objects with the same address
; space, so this should vectorize normally.
define void @foo(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 %n) #0 {
; CHECK-LABEL: @foo(
; CHECK: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp slt i32 %i.0, %n
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.0 to i64
%arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %idxprom1
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
%inc = add nsw i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
; Parameters are unidentified and different address spaces, so cannot vectorize.
define void @bar0(i32* %a, i32 addrspace(1)* %b, i32 %n) #0 {
; CHECK-LABEL: @bar0(
; CHECK-NOT: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp slt i32 %i.0, %n
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.0 to i64
%arrayidx2 = getelementptr inbounds i32* %a, i64 %idxprom1
store i32 %mul, i32* %arrayidx2, align 4
%inc = add nsw i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
; Swapped arguments should be the same
define void @bar1(i32 addrspace(1)* %a, i32* %b, i32 %n) #0 {
; CHECK-LABEL: @bar1(
; CHECK-NOT: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp slt i32 %i.0, %n
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i32* %b, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.0 to i64
%arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %idxprom1
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
%inc = add nsw i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
; We should still be able to vectorize with noalias even if the
; address spaces are different.
define void @bar2(i32* noalias %a, i32 addrspace(1)* noalias %b, i32 %n) #0 {
; CHECK-LABEL: @bar2(
; CHECK: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp slt i32 %i.0, %n
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.0 to i64
%arrayidx2 = getelementptr inbounds i32* %a, i64 %idxprom1
store i32 %mul, i32* %arrayidx2, align 4
%inc = add nsw i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
; Store to identified global with different address space. This isn't
; generally safe and shouldn't be vectorized.
define void @arst0(i32* %b, i32 %n) #0 {
; CHECK-LABEL: @arst0(
; CHECK-NOT: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp slt i32 %i.0, %n
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i32* %b, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.0 to i64
%arrayidx2 = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
%inc = add nsw i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
; Load from identified global with different address space.
; This isn't generally safe and shouldn't be vectorized.
define void @arst1(i32* %b, i32 %n) #0 {
; CHECK-LABEL: @arst1(
; CHECK-NOT: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp slt i32 %i.0, %n
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.0 to i64
%arrayidx2 = getelementptr inbounds i32* %b, i64 %idxprom1
store i32 %mul, i32* %arrayidx2, align 4
%inc = add nsw i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
; Read and write to 2 identified globals in different address
; spaces. This should be vectorized.
define void @aoeu(i32 %n) #0 {
; CHECK-LABEL: @aoeu(
; CHECK: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp slt i32 %i.0, %n
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds [1024 x i32] addrspace(2)* @q_as2, i64 0, i64 %idxprom
%0 = load i32 addrspace(2)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.0 to i64
%arrayidx2 = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
%inc = add nsw i32 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

View File

@ -0,0 +1,142 @@
; RUN: opt -S -march=r600 -mcpu=cayman -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine < %s | FileCheck %s
; Artificial datalayout
target datalayout = "e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
define void @add_ints_1_1_1(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #0 {
; CHECK-LABEL: @add_ints_1_1_1(
; CHECK: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp ult i64 %i.0, 200
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.0
%0 = load i32 addrspace(1)* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32 addrspace(1)* %c, i64 %i.0
%1 = load i32 addrspace(1)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %i.0
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4
%inc = add i64 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
define void @add_ints_as_1_0_0(i32 addrspace(1)* %a, i32* %b, i32* %c) #0 {
; CHECK-LABEL: @add_ints_as_1_0_0(
; CHECK-NOT: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp ult i64 %i.0, 200
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%arrayidx = getelementptr inbounds i32* %b, i64 %i.0
%0 = load i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32* %c, i64 %i.0
%1 = load i32* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %i.0
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4
%inc = add i64 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
define void @add_ints_as_0_1_0(i32* %a, i32 addrspace(1)* %b, i32* %c) #0 {
; CHECK-LABEL: @add_ints_as_0_1_0(
; CHECK-NOT: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp ult i64 %i.0, 200
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.0
%0 = load i32 addrspace(1)* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32* %c, i64 %i.0
%1 = load i32* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32* %a, i64 %i.0
store i32 %add, i32* %arrayidx2, align 4
%inc = add i64 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
define void @add_ints_as_0_1_1(i32* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #0 {
; CHECK-LABEL: @add_ints_as_0_1_1(
; CHECK-NOT: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp ult i64 %i.0, 200
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.0
%0 = load i32 addrspace(1)* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32 addrspace(1)* %c, i64 %i.0
%1 = load i32 addrspace(1)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32* %a, i64 %i.0
store i32 %add, i32* %arrayidx2, align 4
%inc = add i64 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
define void @add_ints_as_0_1_2(i32* %a, i32 addrspace(1)* %b, i32 addrspace(2)* %c) #0 {
; CHECK-LABEL: @add_ints_as_0_1_2(
; CHECK-NOT: <4 x i32>
; CHECK: ret
entry:
br label %for.cond
for.cond: ; preds = %for.body, %entry
%i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp ult i64 %i.0, 200
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.0
%0 = load i32 addrspace(1)* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32 addrspace(2)* %c, i64 %i.0
%1 = load i32 addrspace(2)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32* %a, i64 %i.0
store i32 %add, i32* %arrayidx2, align 4
%inc = add i64 %i.0, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}
attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }