Partially revert r210444 due to performance regression

Summary:
Converting outermost zext(a) to sext(a) causes worse code when the
computation of zext(a) could be reused. For example, after converting

... = array[zext(a)]
... = array[zext(a) + 1]

to

... = array[sext(a)]
... = array[zext(a) + 1],

the program computes sext(a), which is actually unnecessary. I added one
test in split-gep-and-gvn.ll to illustrate this scenario.

Also, with r211281 and r211084, we annotate more "nuw" tags to
computation involving CUDA intrinsics such as threadIdx.x. These
annotations help with splitting GEP a lot, rendering the benefit we get
from this reverted optimization only marginal.

Test Plan: make check-all

Reviewers: eliben, meheff

Reviewed By: meheff

Subscribers: jholewinski, llvm-commits

Differential Revision: http://reviews.llvm.org/D4542

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@213209 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jingyue Wu
2014-07-16 23:25:00 +00:00
parent 07b294a25b
commit 1d56cda023
3 changed files with 60 additions and 86 deletions

View File

@@ -272,23 +272,6 @@ class SeparateConstOffsetFromGEP : public FunctionPass {
///
/// Verified in @i32_add in split-gep.ll
bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
/// For each array index that is in the form of zext(a), convert it to sext(a)
/// if we can prove zext(a) <= max signed value of typeof(a). We prefer
/// sext(a) to zext(a), because in the special case where x + y >= 0 and
/// (x >= 0 or y >= 0), function CanTraceInto can split sext(x + y),
/// while no such case exists for zext(x + y).
///
/// Note that
/// zext(x + y) = zext(x) + zext(y)
/// is wrong, e.g.,
/// zext i32(UINT_MAX + 1) to i64 !=
/// (zext i32 UINT_MAX to i64) + (zext i32 1 to i64)
///
/// Returns true if the module changes.
///
/// Verified in @inbounds_zext_add in split-gep.ll and @sum_of_array3 in
/// split-gep-and-gvn.ll
bool convertInBoundsZExtToSExt(GetElementPtrInst *GEP);
const DataLayout *DL;
};
@@ -613,43 +596,6 @@ bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
return Changed;
}
bool
SeparateConstOffsetFromGEP::convertInBoundsZExtToSExt(GetElementPtrInst *GEP) {
if (!GEP->isInBounds())
return false;
// TODO: consider alloca
GlobalVariable *UnderlyingObject =
dyn_cast<GlobalVariable>(GEP->getPointerOperand());
if (UnderlyingObject == nullptr)
return false;
uint64_t ObjectSize =
DL->getTypeAllocSize(UnderlyingObject->getType()->getElementType());
gep_type_iterator GTI = gep_type_begin(*GEP);
bool Changed = false;
for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E;
++I, ++GTI) {
if (isa<SequentialType>(*GTI)) {
if (ZExtInst *Extended = dyn_cast<ZExtInst>(*I)) {
unsigned SrcBitWidth =
cast<IntegerType>(Extended->getSrcTy())->getBitWidth();
// For GEP operand zext(a), if a <= max signed value of typeof(a), then
// the sign bit of a is zero and sext(a) = zext(a). Because the GEP is
// in bounds, we know a <= ObjectSize, so the condition can be reduced
// to ObjectSize <= max signed value of typeof(a).
if (ObjectSize <=
APInt::getSignedMaxValue(SrcBitWidth).getZExtValue()) {
*I = new SExtInst(Extended->getOperand(0), Extended->getType(),
Extended->getName(), GEP);
Changed = true;
}
}
}
}
return Changed;
}
int64_t
SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
bool &NeedsExtraction) {
@@ -684,9 +630,7 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
if (GEP->hasAllConstantIndices())
return false;
bool Changed = false;
Changed |= canonicalizeArrayIndicesToPointerSize(GEP);
Changed |= convertInBoundsZExtToSExt(GEP);
bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
bool NeedsExtraction;
int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);