Teach ValueTracking a new way to analyze PHI nodes, and and teach

Instcombine to be more aggressive about using SimplifyDemandedBits
on shift nodes. This allows a shift to be simplified to zero in the
included test case.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72204 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman 2009-05-21 02:28:33 +00:00
parent 4a4ea14f7d
commit 9004c8afd4
3 changed files with 66 additions and 4 deletions

View File

@ -48,8 +48,9 @@ static unsigned getOpcode(const Value *V) {
void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
APInt &KnownZero, APInt &KnownOne,
TargetData *TD, unsigned Depth) {
const unsigned MaxDepth = 6;
assert(V && "No Value?");
assert(Depth <= 6 && "Limit Search Depth");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = Mask.getBitWidth();
assert((V->getType()->isInteger() || isa<PointerType>(V->getType())) &&
"Not integer or pointer type!");
@ -88,7 +89,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
KnownZero.clear(); KnownOne.clear(); // Start out not knowing anything.
if (Depth == 6 || Mask == 0)
if (Depth == MaxDepth || Mask == 0)
return; // Limit search depth.
User *I = dyn_cast<User>(V);
@ -522,6 +523,30 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
}
}
// Otherwise take the unions of the known bit sets of the operands,
// taking conservative care to avoid excessive recursion.
if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
KnownZero = APInt::getAllOnesValue(BitWidth);
KnownOne = APInt::getAllOnesValue(BitWidth);
for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
// Skip direct self references.
if (P->getIncomingValue(i) == P) continue;
KnownZero2 = APInt(BitWidth, 0);
KnownOne2 = APInt(BitWidth, 0);
// Recurse, but cap the recursion to one level, because we don't
// want to waste time spinning around in loops.
ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne,
KnownZero2, KnownOne2, TD, MaxDepth-1);
KnownZero &= KnownZero2;
KnownOne &= KnownOne2;
// If all bits have been ruled out, there's no need to check
// more operands.
if (!KnownZero && !KnownOne)
break;
}
}
break;
}
case Instruction::Call:

View File

@ -7152,6 +7152,10 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
}
// See if we can fold away this shift.
if (!isa<VectorType>(I.getType()) && SimplifyDemandedInstructionBits(I))
return &I;
// Try to fold constant and into select arguments.
if (isa<Constant>(Op0))
if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
@ -7171,8 +7175,6 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
uint32_t TypeBits = Op0->getType()->getPrimitiveSizeInBits();
if (SimplifyDemandedInstructionBits(I))
return &I;
// shl uint X, 32 = 0 and shr ubyte Y, 9 = 0, ... just don't eliminate shr
// of a signed value.

View File

@ -0,0 +1,35 @@
; RUN: llvm-as < %s | opt -instcombine | llvm-dis > %t
; RUN: not grep lshr %t
; RUN: grep add %t | count 1
; Instcombine should be able to eliminate the lshr, because only
; bits in the operand which might be non-zero will be shifted
; off the end.
define i32 @hash_string(i8* nocapture %key) nounwind readonly {
entry:
%t0 = load i8* %key, align 1 ; <i8> [#uses=1]
%t1 = icmp eq i8 %t0, 0 ; <i1> [#uses=1]
br i1 %t1, label %bb2, label %bb
bb: ; preds = %bb, %entry
%indvar = phi i64 [ 0, %entry ], [ %tmp, %bb ] ; <i64> [#uses=2]
%k.04 = phi i32 [ 0, %entry ], [ %t8, %bb ] ; <i32> [#uses=2]
%cp.05 = getelementptr i8* %key, i64 %indvar ; <i8*> [#uses=1]
%t2 = shl i32 %k.04, 1 ; <i32> [#uses=1]
%t3 = lshr i32 %k.04, 14 ; <i32> [#uses=1]
%t4 = add i32 %t2, %t3 ; <i32> [#uses=1]
%t5 = load i8* %cp.05, align 1 ; <i8> [#uses=1]
%t6 = sext i8 %t5 to i32 ; <i32> [#uses=1]
%t7 = xor i32 %t6, %t4 ; <i32> [#uses=1]
%t8 = and i32 %t7, 16383 ; <i32> [#uses=2]
%tmp = add i64 %indvar, 1 ; <i64> [#uses=2]
%scevgep = getelementptr i8* %key, i64 %tmp ; <i8*> [#uses=1]
%t9 = load i8* %scevgep, align 1 ; <i8> [#uses=1]
%t10 = icmp eq i8 %t9, 0 ; <i1> [#uses=1]
br i1 %t10, label %bb2, label %bb
bb2: ; preds = %bb, %entry
%k.0.lcssa = phi i32 [ 0, %entry ], [ %t8, %bb ] ; <i32> [#uses=1]
ret i32 %k.0.lcssa
}