Fix PR22179.

We were incorrectly inferring nsw for certain SCEVs. We can be more
aggressive here (see Richard Smith's comment on
http://llvm.org/bugs/show_bug.cgi?id=22179) but this change just
focuses on correctness.

Differential Revision: http://reviews.llvm.org/D6914



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225591 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sanjoy Das 2015-01-10 23:41:24 +00:00
parent 3073d3960a
commit 7f0da20b97
4 changed files with 63 additions and 44 deletions

View File

@ -1737,6 +1737,36 @@ namespace {
};
}
// We're trying to construct a SCEV of type `Type' with `Ops' as operands and
// `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
// can't-overflow flags for the operation if possible.
static SCEV::NoWrapFlags
StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
const SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags OldFlags) {
using namespace std::placeholders;
bool CanAnalyze =
Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
(void)CanAnalyze;
assert(CanAnalyze && "don't call from other places!");
int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
SCEV::NoWrapFlags SignOrUnsignWrap =
ScalarEvolution::maskFlags(OldFlags, SignOrUnsignMask);
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
auto IsKnownNonNegative =
std::bind(std::mem_fn(&ScalarEvolution::isKnownNonNegative), SE, _1);
if (SignOrUnsignWrap == SCEV::FlagNSW &&
std::all_of(Ops.begin(), Ops.end(), IsKnownNonNegative))
return ScalarEvolution::setFlags(OldFlags,
(SCEV::NoWrapFlags)SignOrUnsignMask);
return OldFlags;
}
/// getAddExpr - Get a canonical add expression, or something simpler if
/// possible.
const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
@ -1752,20 +1782,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
"SCEVAddExpr operand types don't match!");
#endif
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
// And vice-versa.
int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
bool All = true;
for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
E = Ops.end(); I != E; ++I)
if (!isKnownNonNegative(*I)) {
All = false;
break;
}
if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
}
Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);
// Sort by complexity, this groups all similar expression types together.
GroupByComplexity(Ops, LI);
@ -2174,20 +2191,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
"SCEVMulExpr operand types don't match!");
#endif
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
// And vice-versa.
int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
bool All = true;
for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
E = Ops.end(); I != E; ++I)
if (!isKnownNonNegative(*I)) {
All = false;
break;
}
if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
}
Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
// Sort by complexity, this groups all similar expression types together.
GroupByComplexity(Ops, LI);
@ -2653,20 +2657,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
// meaningful BE count at this point (and if we don't, we'd be stuck
// with a SCEVCouldNotCompute as the cached BE count).
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
// And vice-versa.
int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
bool All = true;
for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
E = Operands.end(); I != E; ++I)
if (!isKnownNonNegative(*I)) {
All = false;
break;
}
if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
}
Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
// Canonicalize nested AddRecs in by nesting them in order of loop depth.
if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {

View File

@ -0,0 +1,28 @@
; RUN: opt -analyze -scalar-evolution < %s | FileCheck %s
%struct.anon = type { i8 }
%struct.S = type { i32 }
@a = common global %struct.anon zeroinitializer, align 1
@b = common global %struct.S zeroinitializer, align 4
; Function Attrs: nounwind ssp uwtable
define i32 @main() {
; CHECK-LABEL: Classifying expressions for: @main
store i8 0, i8* getelementptr inbounds (%struct.anon* @a, i64 0, i32 0), align 1
br label %loop
loop:
%storemerge1 = phi i8 [ 0, %0 ], [ %inc, %loop ]
%m = load volatile i32* getelementptr inbounds (%struct.S* @b, i64 0, i32 0), align 4
%inc = add nuw i8 %storemerge1, 1
; CHECK: %inc = add nuw i8 %storemerge1, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop>
; CHECK-NOT: --> {1,+,1}<nuw><nsw><%loop>
%exitcond = icmp eq i8 %inc, -128
br i1 %exitcond, label %exit, label %loop
exit:
store i8 -128, i8* getelementptr inbounds (%struct.anon* @a, i64 0, i32 0), align 1
ret i32 0
}

View File

@ -20,7 +20,7 @@ for.body: ; preds = %for.body, %entry
%arrayidx = getelementptr inbounds double* %b, i64 %tmp
%tmp1 = load double* %arrayidx, align 8
; The induction variable should carry the scaling factor: 1 * 8 = 8.
; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 8
; CHECK: [[IVNEXT]] = add nuw i64 [[IV]], 8
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next
%tmp2 = load double* %arrayidx2, align 8

View File

@ -22,7 +22,7 @@ for.body: ; preds = %for.body, %entry
%arrayidx = getelementptr inbounds double* %b, i64 %tmp
%tmp1 = load double* %arrayidx, align 8
; The induction variable should carry the scaling factor: 1.
; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK: [[IVNEXT]] = add nuw i64 [[IV]], 1
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next
%tmp2 = load double* %arrayidx2, align 8