Teach CodeGen's version of computeMaskedBits to understand the range metadata.

This is the CodeGen equivalent of r153747. I tested that there is not noticeable
performance difference with any combination of -O0/-O2 /-g when compiling
gcc as a single compilation unit.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@153817 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Rafael Espindola
2012-03-31 18:14:00 +00:00
parent 5b00ceaeea
commit 95d594cac3
11 changed files with 54 additions and 19 deletions

View File

@@ -197,8 +197,8 @@ static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
KnownOne.setBit(BitWidth - 1);
}
static void computeMaskedBitsLoad(const MDNode &Ranges, const APInt &Mask,
APInt &KnownZero) {
void llvm::computeMaskedBitsLoad(const MDNode &Ranges, const APInt &Mask,
APInt &KnownZero) {
unsigned BitWidth = Mask.getBitWidth();
unsigned NumRanges = Ranges.getNumOperands() / 2;
assert(NumRanges >= 1);