Fix information loss in branch probability computation.

Summary:
This addresses PR 22718. When branch weights are too large, they were
being clamped to the range [1, MaxWeightForBB]. But this clamping is
only applied to edges that go outside the range, so it distorts the
relative branch probabilities.

This patch changes the weight calculation to scale every branch so the
relative probabilities are preserved. The scaling is done differently
now. First, all the branch weights are added up, and if the sum exceeds
32 bits, it computes an integer scale to bring all the weights within
the range.

The patch fixes an existing test that had slightly wrong branch
probabilities due to the previous clamping. It now gets branch weights
scaled accordingly.

Reviewers: dexonsmith

Subscribers: llvm-commits

Differential Revision: http://reviews.llvm.org/D9442

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@236750 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Diego Novillo
2015-05-07 17:22:06 +00:00
parent 8359916759
commit aa46024ea3
3 changed files with 111 additions and 14 deletions

View File

@@ -115,11 +115,6 @@ static const uint32_t NORMAL_WEIGHT = 16;
// Minimum weight of an edge. Please note, that weight is NEVER 0.
static const uint32_t MIN_WEIGHT = 1;
static uint32_t getMaxWeightFor(BasicBlock *BB) {
return UINT32_MAX / BB->getTerminator()->getNumSuccessors();
}
/// \brief Calculate edge weights for successors lead to unreachable.
///
/// Predict that a successor which leads necessarily to an
@@ -185,15 +180,18 @@ bool BranchProbabilityInfo::calcMetadataWeights(BasicBlock *BB) {
if (!WeightsNode)
return false;
// Check that the number of successors is manageable.
assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors");
// Ensure there are weights for all of the successors. Note that the first
// operand to the metadata node is a name, not a weight.
if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1)
return false;
// Build up the final weights that will be used in a temporary buffer, but
// don't add them until all weights are present. Each weight value is clamped
// to [1, getMaxWeightFor(BB)].
uint32_t WeightLimit = getMaxWeightFor(BB);
// Build up the final weights that will be used in a temporary buffer.
// Compute the sum of all weights to later decide whether they need to
// be scaled to fit in 32 bits.
uint64_t WeightSum = 0;
SmallVector<uint32_t, 2> Weights;
Weights.reserve(TI->getNumSuccessors());
for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) {
@@ -201,11 +199,26 @@ bool BranchProbabilityInfo::calcMetadataWeights(BasicBlock *BB) {
mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(i));
if (!Weight)
return false;
Weights.push_back(Weight->getLimitedValue(WeightLimit));
assert(Weight->getValue().getActiveBits() <= 32 &&
"Too many bits for uint32_t");
Weights.push_back(Weight->getZExtValue());
WeightSum += Weights.back();
}
assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
setEdgeWeight(BB, i, Weights[i]);
// If the sum of weights does not fit in 32 bits, scale every weight down
// accordingly.
uint64_t ScalingFactor =
(WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1;
WeightSum = 0;
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
uint32_t W = Weights[i] / ScalingFactor;
WeightSum += W;
setEdgeWeight(BB, i, W);
}
assert(WeightSum <= UINT32_MAX &&
"Expected weights to scale down to 32 bits");
return true;
}