Revert "blockfreq: Rewrite BlockFrequencyInfoImpl" (#2)

This reverts commit r206666, as planned.

Still stumped on why the bots are failing.  Sanitizer bots haven't
turned anything up.  If anyone can help me debug either of the failures
(referenced in r206666) I'll owe them a beer.  (In the meantime, I'll be
auditing my patch for undefined behaviour.)

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206677 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Duncan P. N. Exon Smith 2014-04-19 00:42:46 +00:00
parent 55c1e1bd26
commit 2033057de8
12 changed files with 358 additions and 3145 deletions

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "block-freq"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
@ -107,7 +106,6 @@ struct DOTGraphTraits<BlockFrequencyInfo*> : public DefaultDOTGraphTraits {
INITIALIZE_PASS_BEGIN(BlockFrequencyInfo, "block-freq",
"Block Frequency Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_END(BlockFrequencyInfo, "block-freq",
"Block Frequency Analysis", true, true)
@ -122,16 +120,14 @@ BlockFrequencyInfo::~BlockFrequencyInfo() {}
void BlockFrequencyInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<BranchProbabilityInfo>();
AU.addRequired<LoopInfo>();
AU.setPreservesAll();
}
bool BlockFrequencyInfo::runOnFunction(Function &F) {
BranchProbabilityInfo &BPI = getAnalysis<BranchProbabilityInfo>();
LoopInfo &LI = getAnalysis<LoopInfo>();
if (!BFI)
BFI.reset(new ImplType);
BFI->doFunction(&F, &BPI, &LI);
BFI->doFunction(&F, &BPI);
#ifndef NDEBUG
if (ViewBlockFreqPropagationDAG != GVDT_None)
view();
@ -162,7 +158,7 @@ void BlockFrequencyInfo::view() const {
}
const Function *BlockFrequencyInfo::getFunction() const {
return BFI ? BFI->getFunction() : nullptr;
return BFI ? BFI->Fn : nullptr;
}
raw_ostream &BlockFrequencyInfo::

View File

@ -1,933 +0,0 @@
//===- BlockFrequencyImplInfo.cpp - Block Frequency Info Implementation ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "block-freq"
#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/Support/raw_ostream.h"
#include <deque>
using namespace llvm;
//===----------------------------------------------------------------------===//
//
// PositiveFloat implementation.
//
//===----------------------------------------------------------------------===//
#ifndef _MSC_VER
const int PositiveFloatBase::MaxExponent;
const int PositiveFloatBase::MinExponent;
#endif
static void appendDigit(std::string &Str, unsigned D) {
assert(D < 10);
Str += '0' + D % 10;
}
static void appendNumber(std::string &Str, uint64_t N) {
while (N) {
appendDigit(Str, N % 10);
N /= 10;
}
}
static bool doesRoundUp(char Digit) {
switch (Digit) {
case '5':
case '6':
case '7':
case '8':
case '9':
return true;
default:
return false;
}
}
static std::string toStringAPFloat(uint64_t D, int E, unsigned Precision) {
assert(E >= PositiveFloatBase::MinExponent);
assert(E <= PositiveFloatBase::MaxExponent);
// Find a new E, but don't let it increase past MaxExponent.
int LeadingZeros = PositiveFloatBase::countLeadingZeros64(D);
int NewE = std::min(PositiveFloatBase::MaxExponent, E + 63 - LeadingZeros);
int Shift = 63 - (NewE - E);
assert(Shift <= LeadingZeros);
assert(Shift == LeadingZeros || NewE == PositiveFloatBase::MaxExponent);
D <<= Shift;
E = NewE;
// Check for a denormal.
unsigned AdjustedE = E + 16383;
if (!(D >> 63)) {
assert(E == PositiveFloatBase::MaxExponent);
AdjustedE = 0;
}
// Build the float and print it.
uint64_t RawBits[2] = {D, AdjustedE};
APFloat Float(APFloat::x87DoubleExtended, APInt(80, RawBits));
SmallVector<char, 24> Chars;
Float.toString(Chars, Precision, 0);
return std::string(Chars.begin(), Chars.end());
}
static std::string stripTrailingZeros(std::string Float) {
size_t NonZero = Float.find_last_not_of('0');
assert(NonZero != std::string::npos && "no . in floating point string");
if (Float[NonZero] == '.')
++NonZero;
return Float.substr(0, NonZero + 1);
}
std::string PositiveFloatBase::toString(uint64_t D, int16_t E, int Width,
unsigned Precision) {
if (!D)
return "0.0";
// Canonicalize exponent and digits.
uint64_t Above0 = 0;
uint64_t Below0 = 0;
uint64_t Extra = 0;
int ExtraShift = 0;
if (E == 0) {
Above0 = D;
} else if (E > 0) {
if (int Shift = std::min(int16_t(countLeadingZeros64(D)), E)) {
D <<= Shift;
E -= Shift;
if (!E)
Above0 = D;
}
} else if (E > -64) {
Above0 = D >> -E;
Below0 = D << (64 + E);
} else if (E > -120) {
Below0 = D >> (-E - 64);
Extra = D << (128 + E);
ExtraShift = -64 - E;
}
// Fall back on APFloat for very small and very large numbers.
if (!Above0 && !Below0)
return toStringAPFloat(D, E, Precision);
// Append the digits before the decimal.
std::string Str;
size_t DigitsOut = 0;
if (Above0) {
appendNumber(Str, Above0);
DigitsOut = Str.size();
} else
appendDigit(Str, 0);
std::reverse(Str.begin(), Str.end());
// Return early if there's nothing after the decimal.
if (!Below0)
return Str + ".0";
// Append the decimal and beyond.
Str += '.';
uint64_t Error = UINT64_C(1) << (64 - Width);
// We need to shift Below0 to the right to make space for calculating
// digits. Save the precision we're losing in Extra.
Extra = (Below0 & 0xf) << 56 | (Extra >> 8);
Below0 >>= 4;
size_t SinceDot = 0;
size_t AfterDot = Str.size();
do {
if (ExtraShift) {
--ExtraShift;
Error *= 5;
} else
Error *= 10;
Below0 *= 10;
Extra *= 10;
Below0 += (Extra >> 60);
Extra = Extra & (UINT64_MAX >> 4);
appendDigit(Str, Below0 >> 60);
Below0 = Below0 & (UINT64_MAX >> 4);
if (DigitsOut || Str.back() != '0')
++DigitsOut;
++SinceDot;
} while (Error && (Below0 << 4 | Extra >> 60) >= Error / 2 &&
(!Precision || DigitsOut <= Precision || SinceDot < 2));
// Return early for maximum precision.
if (!Precision || DigitsOut <= Precision)
return stripTrailingZeros(Str);
// Find where to truncate.
size_t Truncate =
std::max(Str.size() - (DigitsOut - Precision), AfterDot + 1);
// Check if there's anything to truncate.
if (Truncate >= Str.size())
return stripTrailingZeros(Str);
bool Carry = doesRoundUp(Str[Truncate]);
if (!Carry)
return stripTrailingZeros(Str.substr(0, Truncate));
// Round with the first truncated digit.
for (std::string::reverse_iterator I(Str.begin() + Truncate), E = Str.rend();
I != E; ++I) {
if (*I == '.')
continue;
if (*I == '9') {
*I = '0';
continue;
}
++*I;
Carry = false;
break;
}
// Add "1" in front if we still need to carry.
return stripTrailingZeros(std::string(Carry, '1') + Str.substr(0, Truncate));
}
raw_ostream &PositiveFloatBase::print(raw_ostream &OS, uint64_t D, int16_t E,
int Width, unsigned Precision) {
return OS << toString(D, E, Width, Precision);
}
void PositiveFloatBase::dump(uint64_t D, int16_t E, int Width) {
print(dbgs(), D, E, Width, 0) << "[" << Width << ":" << D << "*2^" << E
<< "]";
}
static std::pair<uint64_t, int16_t>
getRoundedFloat(uint64_t N, bool ShouldRound, int64_t Shift) {
if (ShouldRound)
if (!++N)
// Rounding caused an overflow.
return std::make_pair(UINT64_C(1), Shift + 64);
return std::make_pair(N, Shift);
}
std::pair<uint64_t, int16_t> PositiveFloatBase::divide64(uint64_t Dividend,
uint64_t Divisor) {
// Input should be sanitized.
assert(Divisor);
assert(Dividend);
// Minimize size of divisor.
int16_t Shift = 0;
if (int Zeros = countTrailingZeros(Divisor)) {
Shift -= Zeros;
Divisor >>= Zeros;
}
// Check for powers of two.
if (Divisor == 1)
return std::make_pair(Dividend, Shift);
// Maximize size of dividend.
if (int Zeros = countLeadingZeros64(Dividend)) {
Shift -= Zeros;
Dividend <<= Zeros;
}
// Start with the result of a divide.
uint64_t Quotient = Dividend / Divisor;
Dividend %= Divisor;
// Continue building the quotient with long division.
//
// TODO: continue with largers digits.
while (!(Quotient >> 63) && Dividend) {
// Shift Dividend, and check for overflow.
bool IsOverflow = Dividend >> 63;
Dividend <<= 1;
--Shift;
// Divide.
bool DoesDivide = IsOverflow || Divisor <= Dividend;
Quotient = (Quotient << 1) | uint64_t(DoesDivide);
Dividend -= DoesDivide ? Divisor : 0;
}
// Round.
if (Dividend >= getHalf(Divisor))
if (!++Quotient)
// Rounding caused an overflow in Quotient.
return std::make_pair(UINT64_C(1), Shift + 64);
return getRoundedFloat(Quotient, Dividend >= getHalf(Divisor), Shift);
}
static void addWithCarry(uint64_t &Upper, uint64_t &Lower, uint64_t N) {
uint64_t NewLower = Lower + (N << 32);
Upper += (N >> 32) + (NewLower < Lower);
Lower = NewLower;
}
std::pair<uint64_t, int16_t> PositiveFloatBase::multiply64(uint64_t L,
uint64_t R) {
// Separate into two 32-bit digits (U.L).
uint64_t UL = L >> 32, LL = L & UINT32_MAX, UR = R >> 32, LR = R & UINT32_MAX;
// Compute cross products.
uint64_t P1 = UL * UR, P2 = UL * LR, P3 = LL * UR, P4 = LL * LR;
// Sum into two 64-bit digits.
uint64_t Upper = P1, Lower = P4;
addWithCarry(Upper, Lower, P2);
addWithCarry(Upper, Lower, P3);
// Check for the lower 32 bits.
if (!Upper)
return std::make_pair(Lower, 0);
// Shift as little as possible to maximize precision.
unsigned LeadingZeros = countLeadingZeros64(Upper);
int16_t Shift = 64 - LeadingZeros;
if (LeadingZeros)
Upper = Upper << LeadingZeros | Lower >> Shift;
bool ShouldRound = Shift && (Lower & UINT64_C(1) << (Shift - 1));
return getRoundedFloat(Upper, ShouldRound, Shift);
}
//===----------------------------------------------------------------------===//
//
// BlockMass implementation.
//
//===----------------------------------------------------------------------===//
BlockMass &BlockMass::operator*=(const BranchProbability &P) {
uint32_t N = P.getNumerator(), D = P.getDenominator();
assert(D || "divide by 0");
assert(N <= D || "fraction greater than 1");
// Fast path for multiplying by 1.0.
if (!Mass || N == D)
return *this;
// Get as much precision as we can.
int Shift = countLeadingZeros(Mass);
uint64_t ShiftedQuotient = (Mass << Shift) / D;
uint64_t Product = ShiftedQuotient * N >> Shift;
// Now check for what's lost.
uint64_t Left = ShiftedQuotient * (D - N) >> Shift;
uint64_t Lost = Mass - Product - Left;
// TODO: prove this assertion.
assert(Lost <= UINT32_MAX);
// Take the product plus a portion of the spoils.
Mass = Product + Lost * N / D;
return *this;
}
PositiveFloat<uint64_t> BlockMass::toFloat() const {
if (isFull())
return PositiveFloat<uint64_t>(1, 0);
return PositiveFloat<uint64_t>(getMass() + 1, -64);
}
void BlockMass::dump() const { print(dbgs()); }
static char getHexDigit(int N) {
assert(N < 16);
if (N < 10)
return '0' + N;
return 'a' + N - 10;
}
raw_ostream &BlockMass::print(raw_ostream &OS) const {
for (int Digits = 0; Digits < 16; ++Digits)
OS << getHexDigit(Mass >> (60 - Digits * 4) & 0xf);
return OS;
}
//===----------------------------------------------------------------------===//
//
// BlockFrequencyInfoImpl implementation.
//
//===----------------------------------------------------------------------===//
namespace {
typedef BlockFrequencyInfoImplBase::BlockNode BlockNode;
typedef BlockFrequencyInfoImplBase::Distribution Distribution;
typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList;
typedef BlockFrequencyInfoImplBase::Float Float;
typedef BlockFrequencyInfoImplBase::PackagedLoopData PackagedLoopData;
typedef BlockFrequencyInfoImplBase::Weight Weight;
typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData;
/// \brief Dithering mass distributer.
///
/// This class splits up a single mass into portions by weight, dithering to
/// spread out error. No mass is lost. The dithering precision depends on the
/// precision of the product of \a BlockMass and \a BranchProbability.
///
/// The distribution algorithm follows.
///
/// 1. Initialize by saving the sum of the weights in \a RemWeight and the
/// mass to distribute in \a RemMass.
///
/// 2. For each portion:
///
/// 1. Construct a branch probability, P, as the portion's weight divided
/// by the current value of \a RemWeight.
/// 2. Calculate the portion's mass as \a RemMass times P.
/// 3. Update \a RemWeight and \a RemMass at each portion by subtracting
/// the current portion's weight and mass.
///
/// Mass is distributed in two ways: full distribution and forward
/// distribution. The latter ignores backedges, and uses the parallel fields
/// \a RemForwardWeight and \a RemForwardMass.
struct DitheringDistributer {
uint32_t RemWeight;
uint32_t RemForwardWeight;
BlockMass RemMass;
BlockMass RemForwardMass;
DitheringDistributer(Distribution &Dist, const BlockMass &Mass);
BlockMass takeLocalMass(uint32_t Weight) {
(void)takeMass(Weight);
return takeForwardMass(Weight);
}
BlockMass takeExitMass(uint32_t Weight) {
(void)takeForwardMass(Weight);
return takeMass(Weight);
}
BlockMass takeBackedgeMass(uint32_t Weight) { return takeMass(Weight); }
private:
BlockMass takeForwardMass(uint32_t Weight);
BlockMass takeMass(uint32_t Weight);
};
}
DitheringDistributer::DitheringDistributer(Distribution &Dist,
const BlockMass &Mass) {
Dist.normalize();
RemWeight = Dist.Total;
RemForwardWeight = Dist.ForwardTotal;
RemMass = Mass;
RemForwardMass = Dist.ForwardTotal ? Mass : BlockMass();
}
BlockMass DitheringDistributer::takeForwardMass(uint32_t Weight) {
// Compute the amount of mass to take.
assert(Weight && "invalid weight");
assert(Weight <= RemForwardWeight);
BlockMass Mass = RemForwardMass * BranchProbability(Weight, RemForwardWeight);
// Decrement totals (dither).
RemForwardWeight -= Weight;
RemForwardMass -= Mass;
return Mass;
}
BlockMass DitheringDistributer::takeMass(uint32_t Weight) {
assert(Weight && "invalid weight");
assert(Weight <= RemWeight);
BlockMass Mass = RemMass * BranchProbability(Weight, RemWeight);
// Decrement totals (dither).
RemWeight -= Weight;
RemMass -= Mass;
return Mass;
}
void Distribution::add(const BlockNode &Node, uint64_t Amount,
Weight::DistType Type) {
assert(Amount && "invalid weight of 0");
uint64_t NewTotal = Total + Amount;
// Check for overflow. It should be impossible to overflow twice.
bool IsOverflow = NewTotal < Total;
assert(!(DidOverflow && IsOverflow) && "unexpected repeated overflow");
DidOverflow |= IsOverflow;
// Update the total.
Total = NewTotal;
// Save the weight.
Weight W;
W.TargetNode = Node;
W.Amount = Amount;
W.Type = Type;
Weights.push_back(W);
if (Type == Weight::Backedge)
return;
// Update forward total. Don't worry about overflow here, since then Total
// will exceed 32-bits and they'll both be recomputed in normalize().
ForwardTotal += Amount;
}
static void combineWeight(Weight &W, const Weight &OtherW) {
assert(OtherW.TargetNode.isValid());
if (!W.Amount) {
W = OtherW;
return;
}
assert(W.Type == OtherW.Type);
assert(W.TargetNode == OtherW.TargetNode);
assert(W.Amount < W.Amount + OtherW.Amount);
W.Amount += OtherW.Amount;
}
static void combineWeightsBySorting(WeightList &Weights) {
// Sort so edges to the same node are adjacent.
std::sort(Weights.begin(), Weights.end(),
[](const Weight &L,
const Weight &R) { return L.TargetNode < R.TargetNode; });
// Combine adjacent edges.
WeightList::iterator O = Weights.begin();
for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E;
++O, (I = L)) {
*O = *I;
// Find the adjacent weights to the same node.
for (++L; L != E && I->TargetNode == L->TargetNode; ++L)
combineWeight(*O, *L);
}
// Erase extra entries.
Weights.erase(O, Weights.end());
return;
}
static void combineWeightsByHashing(WeightList &Weights) {
// Collect weights into a DenseMap.
typedef DenseMap<BlockNode::IndexType, Weight> HashTable;
HashTable Combined(NextPowerOf2(2 * Weights.size()));
for (const Weight &W : Weights)
combineWeight(Combined[W.TargetNode.Index], W);
// Check whether anything changed.
if (Weights.size() == Combined.size())
return;
// Fill in the new weights.
Weights.clear();
Weights.reserve(Combined.size());
for (const auto &I : Combined)
Weights.push_back(I.second);
}
static void combineWeights(WeightList &Weights) {
// Use a hash table for many successors to keep this linear.
if (Weights.size() > 128) {
combineWeightsByHashing(Weights);
return;
}
combineWeightsBySorting(Weights);
}
static uint64_t shiftRightAndRound(uint64_t N, int Shift) {
assert(Shift >= 0);
assert(Shift < 64);
if (!Shift)
return N;
return (N >> Shift) + (UINT64_C(1) & N >> (Shift - 1));
}
void Distribution::normalize() {
// Early exit for termination nodes.
if (Weights.empty())
return;
// Only bother if there are multiple successors.
if (Weights.size() > 1)
combineWeights(Weights);
// Early exit when combined into a single successor.
if (Weights.size() == 1) {
Total = 1;
ForwardTotal = Weights.front().Type != Weight::Backedge;
Weights.front().Amount = 1;
return;
}
// Determine how much to shift right so that the total fits into 32-bits.
//
// If we shift at all, shift by 1 extra. Otherwise, the lower limit of 1
// for each weight can cause a 32-bit overflow.
int Shift = 0;
if (DidOverflow)
Shift = 33;
else if (Total > UINT32_MAX)
Shift = 33 - countLeadingZeros(Total);
// Early exit if nothing needs to be scaled.
if (!Shift)
return;
// Recompute the total through accumulation (rather than shifting it) so that
// it's accurate after shifting. ForwardTotal is dirty here anyway.
Total = 0;
ForwardTotal = 0;
// Sum the weights to each node and shift right if necessary.
for (Weight &W : Weights) {
// Scale down below UINT32_MAX. Since Shift is larger than necessary, we
// can round here without concern about overflow.
assert(W.TargetNode.isValid());
W.Amount = std::max(UINT64_C(1), shiftRightAndRound(W.Amount, Shift));
assert(W.Amount <= UINT32_MAX);
// Update the total.
Total += W.Amount;
if (W.Type == Weight::Backedge)
continue;
// Update the forward total.
ForwardTotal += W.Amount;
}
assert(Total <= UINT32_MAX);
}
void BlockFrequencyInfoImplBase::clear() {
*this = BlockFrequencyInfoImplBase();
}
/// \brief Clear all memory not needed downstream.
///
/// Releases all memory not used downstream. In particular, saves Freqs.
static void cleanup(BlockFrequencyInfoImplBase &BFI) {
std::vector<FrequencyData> SavedFreqs(std::move(BFI.Freqs));
BFI.clear();
BFI.Freqs = std::move(SavedFreqs);
}
/// \brief Get a possibly packaged node.
///
/// Get the node currently representing Node, which could be a containing
/// loop.
///
/// This function should only be called when distributing mass. As long as
/// there are no irreducilbe edges to Node, then it will have complexity O(1)
/// in this context.
///
/// In general, the complexity is O(L), where L is the number of loop headers
/// Node has been packaged into. Since this method is called in the context
/// of distributing mass, L will be the number of loop headers an early exit
/// edge jumps out of.
static BlockNode getPackagedNode(const BlockFrequencyInfoImplBase &BFI,
const BlockNode &Node) {
assert(Node.isValid());
if (!BFI.Working[Node.Index].IsPackaged)
return Node;
if (!BFI.Working[Node.Index].ContainingLoop.isValid())
return Node;
return getPackagedNode(BFI, BFI.Working[Node.Index].ContainingLoop);
}
/// \brief Get the appropriate mass for a possible pseudo-node loop package.
///
/// Get appropriate mass for Node. If Node is a loop-header (whose loop has
/// been packaged), returns the mass of its pseudo-node. If it's a node inside
/// a packaged loop, it returns the loop's pseudo-node.
static BlockMass &getPackageMass(BlockFrequencyInfoImplBase &BFI,
const BlockNode &Node) {
assert(Node.isValid());
assert(!BFI.Working[Node.Index].IsPackaged);
if (!BFI.Working[Node.Index].IsAPackage)
return BFI.Working[Node.Index].Mass;
return BFI.getLoopPackage(Node).Mass;
}
void BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
const BlockNode &LoopHead,
const BlockNode &Pred,
const BlockNode &Succ,
uint64_t Weight) {
if (!Weight)
Weight = 1;
#ifndef NDEBUG
auto debugSuccessor = [&](const char *Type, const BlockNode &Resolved) {
dbgs() << " =>"
<< " [" << Type << "] weight = " << Weight;
if (Succ != LoopHead)
dbgs() << ", succ = " << getBlockName(Succ);
if (Resolved != Succ)
dbgs() << ", resolved = " << getBlockName(Resolved);
dbgs() << "\n";
};
(void)debugSuccessor;
#endif
if (Succ == LoopHead) {
DEBUG(debugSuccessor("backedge", Succ));
Dist.addBackedge(LoopHead, Weight);
return;
}
BlockNode Resolved = getPackagedNode(*this, Succ);
assert(Resolved != LoopHead);
if (Working[Resolved.Index].ContainingLoop != LoopHead) {
DEBUG(debugSuccessor(" exit ", Resolved));
Dist.addExit(Resolved, Weight);
return;
}
if (!LoopHead.isValid() && Resolved < Pred) {
// Irreducible backedge. Skip this edge in the distribution.
DEBUG(debugSuccessor("skipped ", Resolved));
return;
}
DEBUG(debugSuccessor(" local ", Resolved));
Dist.addLocal(Resolved, Weight);
}
void BlockFrequencyInfoImplBase::addLoopSuccessorsToDist(
const BlockNode &LoopHead, const BlockNode &LocalLoopHead,
Distribution &Dist) {
PackagedLoopData &LoopPackage = getLoopPackage(LocalLoopHead);
const PackagedLoopData::ExitMap &Exits = LoopPackage.Exits;
// Copy the exit map into Dist.
for (const auto &I : Exits)
addToDist(Dist, LoopHead, LocalLoopHead, I.first, I.second.getMass());
// We don't need this map any more. Clear it to prevent quadratic memory
// usage in deeply nested loops with irreducible control flow.
LoopPackage.Exits.clear();
}
/// \brief Get the maximum allowed loop scale.
///
/// Gives the maximum number of estimated iterations allowed for a loop.
/// Downstream users have trouble with very large numbers (even within
/// 64-bits). Perhaps they can be changed to use PositiveFloat.
///
/// TODO: change downstream users so that this can be increased or removed.
static Float getMaxLoopScale() { return Float(1, 12); }
/// \brief Compute the loop scale for a loop.
void BlockFrequencyInfoImplBase::computeLoopScale(const BlockNode &LoopHead) {
// Compute loop scale.
DEBUG(dbgs() << "compute-loop-scale: " << getBlockName(LoopHead) << "\n");
// LoopScale == 1 / ExitMass
// ExitMass == HeadMass - BackedgeMass
PackagedLoopData &LoopPackage = getLoopPackage(LoopHead);
BlockMass ExitMass = BlockMass::getFull() - LoopPackage.BackedgeMass;
// Block scale stores the inverse of the scale.
LoopPackage.Scale = ExitMass.toFloat().inverse();
DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull()
<< " - " << LoopPackage.BackedgeMass << ")\n"
<< " - scale = " << LoopPackage.Scale << "\n");
if (LoopPackage.Scale > getMaxLoopScale()) {
LoopPackage.Scale = getMaxLoopScale();
DEBUG(dbgs() << " - reduced-to-max-scale: " << getMaxLoopScale() << "\n");
}
}
/// \brief Package up a loop.
void BlockFrequencyInfoImplBase::packageLoop(const BlockNode &LoopHead) {
DEBUG(dbgs() << "packaging-loop: " << getBlockName(LoopHead) << "\n");
Working[LoopHead.Index].IsAPackage = true;
for (const BlockNode &M : getLoopPackage(LoopHead).Members) {
DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n");
Working[M.Index].IsPackaged = true;
}
}
void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
const BlockNode &LoopHead,
Distribution &Dist) {
BlockMass Mass = getPackageMass(*this, Source);
DEBUG(dbgs() << " => mass: " << Mass
<< " ( general | forward )\n");
// Distribute mass to successors as laid out in Dist.
DitheringDistributer D(Dist, Mass);
#ifndef NDEBUG
auto debugAssign = [&](const BlockNode &T, const BlockMass &M,
const char *Desc) {
dbgs() << " => assign " << M << " (" << D.RemMass << "|"
<< D.RemForwardMass << ")";
if (Desc)
dbgs() << " [" << Desc << "]";
if (T.isValid())
dbgs() << " to " << getBlockName(T);
dbgs() << "\n";
};
(void)debugAssign;
#endif
PackagedLoopData *LoopPackage = 0;
if (LoopHead.isValid())
LoopPackage = &getLoopPackage(LoopHead);
for (const Weight &W : Dist.Weights) {
// Check for a local edge (forward and non-exit).
if (W.Type == Weight::Local) {
BlockMass Local = D.takeLocalMass(W.Amount);
getPackageMass(*this, W.TargetNode) += Local;
DEBUG(debugAssign(W.TargetNode, Local, nullptr));
continue;
}
// Backedges and exits only make sense if we're processing a loop.
assert(LoopPackage && "backedge or exit outside of loop");
// Check for a backedge.
if (W.Type == Weight::Backedge) {
BlockMass Back = D.takeBackedgeMass(W.Amount);
LoopPackage->BackedgeMass += Back;
DEBUG(debugAssign(BlockNode(), Back, "back"));
continue;
}
// This must be an exit.
assert(W.Type == Weight::Exit);
BlockMass Exit = D.takeExitMass(W.Amount);
LoopPackage->Exits.push_back(std::make_pair(W.TargetNode, Exit));
DEBUG(debugAssign(W.TargetNode, Exit, "exit"));
}
}
static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI,
const Float &Min, const Float &Max) {
// Scale the Factor to a size that creates integers. Ideally, integers would
// be scaled so that Max == UINT64_MAX so that they can be best
// differentiated. However, the register allocator currently deals poorly
// with large numbers. Instead, push Min up a little from 1 to give some
// room to differentiate small, unequal numbers.
//
// TODO: fix issues downstream so that ScalingFactor can be Float(1,64)/Max.
Float ScalingFactor = Min.inverse();
if ((Max / Min).lg() < 60)
ScalingFactor <<= 3;
// Translate the floats to integers.
DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max
<< ", factor = " << ScalingFactor << "\n");
for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) {
Float Scaled = BFI.Freqs[Index].Floating * ScalingFactor;
BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>());
DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = "
<< BFI.Freqs[Index].Floating << ", scaled = " << Scaled
<< ", int = " << BFI.Freqs[Index].Integer << "\n");
}
}
static void scaleBlockData(BlockFrequencyInfoImplBase &BFI,
const BlockNode &Node,
const PackagedLoopData &Loop) {
Float F = Loop.Mass.toFloat() * Loop.Scale;
Float &Current = BFI.Freqs[Node.Index].Floating;
Float Updated = Current * F;
DEBUG(dbgs() << " - " << BFI.getBlockName(Node) << ": " << Current << " => "
<< Updated << "\n");
Current = Updated;
}
/// \brief Unwrap a loop package.
///
/// Visits all the members of a loop, adjusting their BlockData according to
/// the loop's pseudo-node.
static void unwrapLoopPackage(BlockFrequencyInfoImplBase &BFI,
const BlockNode &Head) {
assert(Head.isValid());
PackagedLoopData &LoopPackage = BFI.getLoopPackage(Head);
DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getBlockName(Head)
<< ": mass = " << LoopPackage.Mass
<< ", scale = " << LoopPackage.Scale << "\n");
scaleBlockData(BFI, Head, LoopPackage);
// Propagate the head scale through the loop. Since members are visited in
// RPO, the head scale will be updated by the loop scale first, and then the
// final head scale will be used for updated the rest of the members.
for (const BlockNode &M : LoopPackage.Members) {
const FrequencyData &HeadData = BFI.Freqs[Head.Index];
FrequencyData &Freqs = BFI.Freqs[M.Index];
Float NewFreq = Freqs.Floating * HeadData.Floating;
DEBUG(dbgs() << " - " << BFI.getBlockName(M) << ": " << Freqs.Floating
<< " => " << NewFreq << "\n");
Freqs.Floating = NewFreq;
}
}
void BlockFrequencyInfoImplBase::finalizeMetrics() {
// Set initial frequencies from loop-local masses.
for (size_t Index = 0; Index < Working.size(); ++Index)
Freqs[Index].Floating = Working[Index].Mass.toFloat();
// Unwrap loop packages in reverse post-order, tracking min and max
// frequencies.
auto Min = Float::getLargest();
auto Max = Float::getZero();
for (size_t Index = 0; Index < Working.size(); ++Index) {
if (Working[Index].isLoopHeader())
unwrapLoopPackage(*this, BlockNode(Index));
// Update max scale.
Min = std::min(Min, Freqs[Index].Floating);
Max = std::max(Max, Freqs[Index].Floating);
}
// Convert to integers.
convertFloatingToInteger(*this, Min, Max);
// Clean up data structures.
cleanup(*this);
// Print out the final stats.
DEBUG(dump());
}
BlockFrequency
BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const {
if (!Node.isValid())
return 0;
return Freqs[Node.Index].Integer;
}
Float
BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const {
if (!Node.isValid())
return Float::getZero();
return Freqs[Node.Index].Floating;
}
std::string
BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const {
return std::string();
}
raw_ostream &
BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
const BlockNode &Node) const {
return OS << getFloatingBlockFreq(Node);
}
raw_ostream &
BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
const BlockFrequency &Freq) const {
Float Block(Freq.getFrequency(), 0);
Float Entry(getEntryFreq(), 0);
return OS << Block / Entry;
}

View File

@ -7,7 +7,6 @@ add_llvm_library(LLVMAnalysis
Analysis.cpp
BasicAliasAnalysis.cpp
BlockFrequencyInfo.cpp
BlockFrequencyInfoImpl.cpp
BranchProbabilityInfo.cpp
CFG.cpp
CFGPrinter.cpp

View File

@ -11,12 +11,9 @@
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "block-freq"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/CommandLine.h"
@ -115,7 +112,6 @@ struct DOTGraphTraits<MachineBlockFrequencyInfo*> :
INITIALIZE_PASS_BEGIN(MachineBlockFrequencyInfo, "machine-block-freq",
"Machine Block Frequency Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(MachineBlockFrequencyInfo, "machine-block-freq",
"Machine Block Frequency Analysis", true, true)
@ -131,18 +127,16 @@ MachineBlockFrequencyInfo::~MachineBlockFrequencyInfo() {}
void MachineBlockFrequencyInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineBranchProbabilityInfo>();
AU.addRequired<MachineLoopInfo>();
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}
bool MachineBlockFrequencyInfo::runOnMachineFunction(MachineFunction &F) {
MachineBranchProbabilityInfo &MBPI =
getAnalysis<MachineBranchProbabilityInfo>();
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
getAnalysis<MachineBranchProbabilityInfo>();
if (!MBFI)
MBFI.reset(new ImplType);
MBFI->doFunction(&F, &MBPI, &MLI);
MBFI->doFunction(&F, &MBPI);
#ifndef NDEBUG
if (ViewMachineBlockFreqPropagationDAG != GVDT_None) {
view();
@ -172,7 +166,7 @@ getBlockFreq(const MachineBasicBlock *MBB) const {
}
const MachineFunction *MachineBlockFrequencyInfo::getFunction() const {
return MBFI ? MBFI->getFunction() : nullptr;
return MBFI ? MBFI->Fn : nullptr;
}
raw_ostream &

View File

@ -1,50 +0,0 @@
; RUN: opt < %s -analyze -block-freq | FileCheck %s
declare void @g(i32 %x)
; CHECK-LABEL: Printing analysis {{.*}} for function 'branch_weight_0':
; CHECK-NEXT: block-frequency-info: branch_weight_0
define void @branch_weight_0(i32 %a) {
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
br label %for.body
; Check that we get 1,4 instead of 0,3.
; CHECK-NEXT: for.body: float = 4.0,
for.body:
%i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
call void @g(i32 %i)
%inc = add i32 %i, 1
%cmp = icmp ugt i32 %inc, %a
br i1 %cmp, label %for.end, label %for.body, !prof !0
; CHECK-NEXT: for.end: float = 1.0, int = [[ENTRY]]
for.end:
ret void
}
!0 = metadata !{metadata !"branch_weights", i32 0, i32 3}
; CHECK-LABEL: Printing analysis {{.*}} for function 'infinite_loop'
; CHECK-NEXT: block-frequency-info: infinite_loop
define void @infinite_loop(i1 %x) {
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
br i1 %x, label %for.body, label %for.end, !prof !1
; Check that the loop scale maxes out at 4096, giving 2048 here.
; CHECK-NEXT: for.body: float = 2048.0,
for.body:
%i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
call void @g(i32 %i)
%inc = add i32 %i, 1
br label %for.body
; Check that the exit weight is half of entry, since half is lost in the
; infinite loop above.
; CHECK-NEXT: for.end: float = 0.5,
for.end:
ret void
}
!1 = metadata !{metadata !"branch_weights", i32 1, i32 1}

View File

@ -1,14 +1,13 @@
; RUN: opt < %s -analyze -block-freq | FileCheck %s
define i32 @test1(i32 %i, i32* %a) {
; CHECK-LABEL: Printing analysis {{.*}} for function 'test1':
; CHECK-NEXT: block-frequency-info: test1
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
; CHECK: Printing analysis {{.*}} for function 'test1'
; CHECK: entry = 1.0
entry:
br label %body
; Loop backedges are weighted and thus their bodies have a greater frequency.
; CHECK-NEXT: body: float = 32.0,
; CHECK: body = 32.0
body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
@ -19,29 +18,29 @@ body:
%exitcond = icmp eq i32 %next, %i
br i1 %exitcond, label %exit, label %body
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
; CHECK: exit = 1.0
exit:
ret i32 %sum
}
define i32 @test2(i32 %i, i32 %a, i32 %b) {
; CHECK-LABEL: Printing analysis {{.*}} for function 'test2':
; CHECK-NEXT: block-frequency-info: test2
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
; CHECK: Printing analysis {{.*}} for function 'test2'
; CHECK: entry = 1.0
entry:
%cond = icmp ult i32 %i, 42
br i1 %cond, label %then, label %else, !prof !0
; The 'then' branch is predicted more likely via branch weight metadata.
; CHECK-NEXT: then: float = 0.9411{{[0-9]*}},
; CHECK: then = 0.94116
then:
br label %exit
; CHECK-NEXT: else: float = 0.05882{{[0-9]*}},
; CHECK: else = 0.05877
else:
br label %exit
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
; FIXME: It may be a bug that we don't sum back to 1.0.
; CHECK: exit = 0.99993
exit:
%result = phi i32 [ %a, %then ], [ %b, %else ]
ret i32 %result
@ -50,37 +49,37 @@ exit:
!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
define i32 @test3(i32 %i, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
; CHECK-LABEL: Printing analysis {{.*}} for function 'test3':
; CHECK-NEXT: block-frequency-info: test3
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
; CHECK: Printing analysis {{.*}} for function 'test3'
; CHECK: entry = 1.0
entry:
switch i32 %i, label %case_a [ i32 1, label %case_b
i32 2, label %case_c
i32 3, label %case_d
i32 4, label %case_e ], !prof !1
; CHECK-NEXT: case_a: float = 0.05,
; CHECK: case_a = 0.04998
case_a:
br label %exit
; CHECK-NEXT: case_b: float = 0.05,
; CHECK: case_b = 0.04998
case_b:
br label %exit
; The 'case_c' branch is predicted more likely via branch weight metadata.
; CHECK-NEXT: case_c: float = 0.8,
; CHECK: case_c = 0.79998
case_c:
br label %exit
; CHECK-NEXT: case_d: float = 0.05,
; CHECK: case_d = 0.04998
case_d:
br label %exit
; CHECK-NEXT: case_e: float = 0.05,
; CHECK: case_e = 0.04998
case_e:
br label %exit
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
; FIXME: It may be a bug that we don't sum back to 1.0.
; CHECK: exit = 0.99993
exit:
%result = phi i32 [ %a, %case_a ],
[ %b, %case_b ],
@ -92,50 +91,44 @@ exit:
!1 = metadata !{metadata !"branch_weights", i32 4, i32 4, i32 64, i32 4, i32 4}
; CHECK: Printing analysis {{.*}} for function 'nested_loops'
; CHECK: entry = 1.0
; This test doesn't seem to be assigning sensible frequencies to nested loops.
define void @nested_loops(i32 %a) {
; CHECK-LABEL: Printing analysis {{.*}} for function 'nested_loops':
; CHECK-NEXT: block-frequency-info: nested_loops
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
br label %for.cond1.preheader
; CHECK-NEXT: for.cond1.preheader: float = 4001.0,
for.cond1.preheader:
%x.024 = phi i32 [ 0, %entry ], [ %inc12, %for.inc11 ]
br label %for.cond4.preheader
; CHECK-NEXT: for.cond4.preheader: float = 16008001.0,
for.cond4.preheader:
%y.023 = phi i32 [ 0, %for.cond1.preheader ], [ %inc9, %for.inc8 ]
%add = add i32 %y.023, %x.024
br label %for.body6
; CHECK-NEXT: for.body6: float = 64048012001.0,
for.body6:
%z.022 = phi i32 [ 0, %for.cond4.preheader ], [ %inc, %for.body6 ]
%add7 = add i32 %add, %z.022
tail call void @g(i32 %add7)
tail call void @g(i32 %add7) #2
%inc = add i32 %z.022, 1
%cmp5 = icmp ugt i32 %inc, %a
br i1 %cmp5, label %for.inc8, label %for.body6, !prof !2
; CHECK-NEXT: for.inc8: float = 16008001.0,
for.inc8:
%inc9 = add i32 %y.023, 1
%cmp2 = icmp ugt i32 %inc9, %a
br i1 %cmp2, label %for.inc11, label %for.cond4.preheader, !prof !2
; CHECK-NEXT: for.inc11: float = 4001.0,
for.inc11:
%inc12 = add i32 %x.024, 1
%cmp = icmp ugt i32 %inc12, %a
br i1 %cmp, label %for.end13, label %for.cond1.preheader, !prof !2
; CHECK-NEXT: for.end13: float = 1.0, int = [[ENTRY]]
for.end13:
ret void
}
declare void @g(i32)
declare void @g(i32) #1
!2 = metadata !{metadata !"branch_weights", i32 1, i32 4000}

View File

@ -1,165 +0,0 @@
; RUN: opt < %s -analyze -block-freq | FileCheck %s
; CHECK-LABEL: Printing analysis {{.*}} for function 'double_exit':
; CHECK-NEXT: block-frequency-info: double_exit
define i32 @double_exit(i32 %N) {
; Mass = 1
; Frequency = 1
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
br label %outer
; Mass = 1
; Backedge mass = 1/3, exit mass = 2/3
; Loop scale = 3/2
; Psuedo-edges = exit
; Psuedo-mass = 1
; Frequency = 1*3/2*1 = 3/2
; CHECK-NEXT: outer: float = 1.5,
outer:
%I.0 = phi i32 [ 0, %entry ], [ %inc6, %outer.inc ]
%Return.0 = phi i32 [ 0, %entry ], [ %Return.1, %outer.inc ]
%cmp = icmp slt i32 %I.0, %N
br i1 %cmp, label %inner, label %exit, !prof !2 ; 2:1
; Mass = 1
; Backedge mass = 3/5, exit mass = 2/5
; Loop scale = 5/2
; Pseudo-edges = outer.inc @ 1/5, exit @ 1/5
; Pseudo-mass = 2/3
; Frequency = 3/2*1*5/2*2/3 = 5/2
; CHECK-NEXT: inner: float = 2.5,
inner:
%Return.1 = phi i32 [ %Return.0, %outer ], [ %call4, %inner.inc ]
%J.0 = phi i32 [ %I.0, %outer ], [ %inc, %inner.inc ]
%cmp2 = icmp slt i32 %J.0, %N
br i1 %cmp2, label %inner.body, label %outer.inc, !prof !1 ; 4:1
; Mass = 4/5
; Frequency = 5/2*4/5 = 2
; CHECK-NEXT: inner.body: float = 2.0,
inner.body:
%call = call i32 @c2(i32 %I.0, i32 %J.0)
%tobool = icmp ne i32 %call, 0
br i1 %tobool, label %exit, label %inner.inc, !prof !0 ; 3:1
; Mass = 3/5
; Frequency = 5/2*3/5 = 3/2
; CHECK-NEXT: inner.inc: float = 1.5,
inner.inc:
%call4 = call i32 @logic2(i32 %Return.1, i32 %I.0, i32 %J.0)
%inc = add nsw i32 %J.0, 1
br label %inner
; Mass = 1/3
; Frequency = 3/2*1/3 = 1/2
; CHECK-NEXT: outer.inc: float = 0.5,
outer.inc:
%inc6 = add nsw i32 %I.0, 1
br label %outer
; Mass = 1
; Frequency = 1
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
%Return.2 = phi i32 [ %Return.1, %inner.body ], [ %Return.0, %outer ]
ret i32 %Return.2
}
!0 = metadata !{metadata !"branch_weights", i32 1, i32 3}
!1 = metadata !{metadata !"branch_weights", i32 4, i32 1}
!2 = metadata !{metadata !"branch_weights", i32 2, i32 1}
declare i32 @c2(i32, i32)
declare i32 @logic2(i32, i32, i32)
; CHECK-LABEL: Printing analysis {{.*}} for function 'double_exit_in_loop':
; CHECK-NEXT: block-frequency-info: double_exit_in_loop
define i32 @double_exit_in_loop(i32 %N) {
; Mass = 1
; Frequency = 1
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
br label %outer
; Mass = 1
; Backedge mass = 1/2, exit mass = 1/2
; Loop scale = 2
; Pseudo-edges = exit
; Psuedo-mass = 1
; Frequency = 1*2*1 = 2
; CHECK-NEXT: outer: float = 2.0,
outer:
%I.0 = phi i32 [ 0, %entry ], [ %inc12, %outer.inc ]
%Return.0 = phi i32 [ 0, %entry ], [ %Return.3, %outer.inc ]
%cmp = icmp slt i32 %I.0, %N
br i1 %cmp, label %middle, label %exit, !prof !3 ; 1:1
; Mass = 1
; Backedge mass = 1/3, exit mass = 2/3
; Loop scale = 3/2
; Psuedo-edges = outer.inc
; Psuedo-mass = 1/2
; Frequency = 2*1*3/2*1/2 = 3/2
; CHECK-NEXT: middle: float = 1.5,
middle:
%J.0 = phi i32 [ %I.0, %outer ], [ %inc9, %middle.inc ]
%Return.1 = phi i32 [ %Return.0, %outer ], [ %Return.2, %middle.inc ]
%cmp2 = icmp slt i32 %J.0, %N
br i1 %cmp2, label %inner, label %outer.inc, !prof !2 ; 2:1
; Mass = 1
; Backedge mass = 3/5, exit mass = 2/5
; Loop scale = 5/2
; Pseudo-edges = middle.inc @ 1/5, outer.inc @ 1/5
; Pseudo-mass = 2/3
; Frequency = 3/2*1*5/2*2/3 = 5/2
; CHECK-NEXT: inner: float = 2.5,
inner:
%Return.2 = phi i32 [ %Return.1, %middle ], [ %call7, %inner.inc ]
%K.0 = phi i32 [ %J.0, %middle ], [ %inc, %inner.inc ]
%cmp5 = icmp slt i32 %K.0, %N
br i1 %cmp5, label %inner.body, label %middle.inc, !prof !1 ; 4:1
; Mass = 4/5
; Frequency = 5/2*4/5 = 2
; CHECK-NEXT: inner.body: float = 2.0,
inner.body:
%call = call i32 @c3(i32 %I.0, i32 %J.0, i32 %K.0)
%tobool = icmp ne i32 %call, 0
br i1 %tobool, label %outer.inc, label %inner.inc, !prof !0 ; 3:1
; Mass = 3/5
; Frequency = 5/2*3/5 = 3/2
; CHECK-NEXT: inner.inc: float = 1.5,
inner.inc:
%call7 = call i32 @logic3(i32 %Return.2, i32 %I.0, i32 %J.0, i32 %K.0)
%inc = add nsw i32 %K.0, 1
br label %inner
; Mass = 1/3
; Frequency = 3/2*1/3 = 1/2
; CHECK-NEXT: middle.inc: float = 0.5,
middle.inc:
%inc9 = add nsw i32 %J.0, 1
br label %middle
; Mass = 1/2
; Frequency = 2*1/2 = 1
; CHECK-NEXT: outer.inc: float = 1.0,
outer.inc:
%Return.3 = phi i32 [ %Return.2, %inner.body ], [ %Return.1, %middle ]
%inc12 = add nsw i32 %I.0, 1
br label %outer
; Mass = 1
; Frequency = 1
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
ret i32 %Return.0
}
!3 = metadata !{metadata !"branch_weights", i32 1, i32 1}
declare i32 @c3(i32, i32, i32)
declare i32 @logic3(i32, i32, i32, i32)

View File

@ -1,197 +0,0 @@
; RUN: opt < %s -analyze -block-freq | FileCheck %s
; A loop with multiple exits should be handled correctly.
;
; CHECK-LABEL: Printing analysis {{.*}} for function 'multiexit':
; CHECK-NEXT: block-frequency-info: multiexit
define void @multiexit(i32 %a) {
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
br label %loop.1
; CHECK-NEXT: loop.1: float = 1.333{{3*}},
loop.1:
%i = phi i32 [ 0, %entry ], [ %inc.2, %loop.2 ]
call void @f(i32 %i)
%inc.1 = add i32 %i, 1
%cmp.1 = icmp ugt i32 %inc.1, %a
br i1 %cmp.1, label %exit.1, label %loop.2, !prof !0
; CHECK-NEXT: loop.2: float = 0.666{{6*7}},
loop.2:
call void @g(i32 %inc.1)
%inc.2 = add i32 %inc.1, 1
%cmp.2 = icmp ugt i32 %inc.2, %a
br i1 %cmp.2, label %exit.2, label %loop.1, !prof !1
; CHECK-NEXT: exit.1: float = 0.666{{6*7}},
exit.1:
call void @h(i32 %inc.1)
br label %return
; CHECK-NEXT: exit.2: float = 0.333{{3*}},
exit.2:
call void @i(i32 %inc.2)
br label %return
; CHECK-NEXT: return: float = 1.0, int = [[ENTRY]]
return:
ret void
}
declare void @f(i32 %x)
declare void @g(i32 %x)
declare void @h(i32 %x)
declare void @i(i32 %x)
!0 = metadata !{metadata !"branch_weights", i32 3, i32 3}
!1 = metadata !{metadata !"branch_weights", i32 5, i32 5}
; The current BlockFrequencyInfo algorithm doesn't handle multiple entrances
; into a loop very well. The frequencies assigned to blocks in the loop are
; predictable (and not absurd), but also not correct and therefore not worth
; testing.
;
; There are two testcases below.
;
; For each testcase, I use a CHECK-NEXT/NOT combo like an XFAIL with the
; granularity of a single check. If/when this behaviour is fixed, we'll know
; about it, and the test should be updated.
;
; Testcase #1
; ===========
;
; In this case c1 and c2 should have frequencies of 15/7 and 13/7,
; respectively. To calculate this, consider assigning 1.0 to entry, and
; distributing frequency iteratively (to infinity). At the first iteration,
; entry gives 3/4 to c1 and 1/4 to c2. At every step after, c1 and c2 give 3/4
; of what they have to each other. Somehow, all of it comes out to exit.
;
; c1 = 3/4 + 1/4*3/4 + 3/4*3^2/4^2 + 1/4*3^3/4^3 + 3/4*3^3/4^3 + ...
; c2 = 1/4 + 3/4*3/4 + 1/4*3^2/4^2 + 3/4*3^3/4^3 + 1/4*3^3/4^3 + ...
;
; Simplify by splitting up the odd and even terms of the series and taking out
; factors so that the infite series matches:
;
; c1 = 3/4 *(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
; + 3/16*(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
; c2 = 1/4 *(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
; + 9/16*(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
;
; c1 = 15/16*(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
; c2 = 13/16*(9^0/16^0 + 9^1/16^1 + 9^2/16^2 + ...)
;
; Since this geometric series sums to 16/7:
;
; c1 = 15/7
; c2 = 13/7
;
; If we treat c1 and c2 as members of the same loop, the exit frequency of the
; loop as a whole is 1/4, so the loop scale should be 4. Summing c1 and c2
; gives 28/7, or 4.0, which is nice confirmation of the math above.
;
; However, assuming c1 precedes c2 in reverse post-order, the current algorithm
; returns 3/4 and 13/16, respectively. LoopInfo ignores edges between loops
; (and doesn't see any loops here at all), and -block-freq ignores the
; irreducible edge from c2 to c1.
;
; CHECK-LABEL: Printing analysis {{.*}} for function 'multientry':
; CHECK-NEXT: block-frequency-info: multientry
define void @multientry(i32 %a) {
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
%choose = call i32 @choose(i32 %a)
%compare = icmp ugt i32 %choose, %a
br i1 %compare, label %c1, label %c2, !prof !2
; This is like a single-line XFAIL (see above).
; CHECK-NEXT: c1:
; CHECK-NOT: float = 2.142857{{[0-9]*}},
c1:
%i1 = phi i32 [ %a, %entry ], [ %i2.inc, %c2 ]
%i1.inc = add i32 %i1, 1
%choose1 = call i32 @choose(i32 %i1)
%compare1 = icmp ugt i32 %choose1, %a
br i1 %compare1, label %c2, label %exit, !prof !2
; This is like a single-line XFAIL (see above).
; CHECK-NEXT: c2:
; CHECK-NOT: float = 1.857142{{[0-9]*}},
c2:
%i2 = phi i32 [ %a, %entry ], [ %i1.inc, %c1 ]
%i2.inc = add i32 %i2, 1
%choose2 = call i32 @choose(i32 %i2)
%compare2 = icmp ugt i32 %choose2, %a
br i1 %compare2, label %c1, label %exit, !prof !2
; We still shouldn't lose any frequency.
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
ret void
}
; Testcase #2
; ===========
;
; In this case c1 and c2 should be treated as equals in a single loop. The
; exit frequency is 1/3, so the scaling factor for the loop should be 3.0. The
; loop is entered 2/3 of the time, and c1 and c2 split the total loop frequency
; evenly (1/2), so they should each have frequencies of 1.0 (3.0*2/3*1/2).
; Another way of computing this result is by assigning 1.0 to entry and showing
; that c1 and c2 should accumulate frequencies of:
;
; 1/3 + 2/9 + 4/27 + 8/81 + ...
; 2^0/3^1 + 2^1/3^2 + 2^2/3^3 + 2^3/3^4 + ...
;
; At the first step, c1 and c2 each get 1/3 of the entry. At each subsequent
; step, c1 and c2 each get 1/3 of what's left in c1 and c2 combined. This
; infinite series sums to 1.
;
; However, assuming c1 precedes c2 in reverse post-order, the current algorithm
; returns 1/2 and 3/4, respectively. LoopInfo ignores edges between loops (and
; treats c1 and c2 as self-loops only), and -block-freq ignores the irreducible
; edge from c2 to c1.
;
; Below I use a CHECK-NEXT/NOT combo like an XFAIL with the granularity of a
; single check. If/when this behaviour is fixed, we'll know about it, and the
; test should be updated.
;
; CHECK-LABEL: Printing analysis {{.*}} for function 'crossloops':
; CHECK-NEXT: block-frequency-info: crossloops
define void @crossloops(i32 %a) {
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
%choose = call i32 @choose(i32 %a)
switch i32 %choose, label %exit [ i32 1, label %c1
i32 2, label %c2 ], !prof !3
; This is like a single-line XFAIL (see above).
; CHECK-NEXT: c1:
; CHECK-NOT: float = 1.0,
c1:
%i1 = phi i32 [ %a, %entry ], [ %i1.inc, %c1 ], [ %i2.inc, %c2 ]
%i1.inc = add i32 %i1, 1
%choose1 = call i32 @choose(i32 %i1)
switch i32 %choose1, label %exit [ i32 1, label %c1
i32 2, label %c2 ], !prof !3
; This is like a single-line XFAIL (see above).
; CHECK-NEXT: c2:
; CHECK-NOT: float = 1.0,
c2:
%i2 = phi i32 [ %a, %entry ], [ %i1.inc, %c1 ], [ %i2.inc, %c2 ]
%i2.inc = add i32 %i2, 1
%choose2 = call i32 @choose(i32 %i2)
switch i32 %choose2, label %exit [ i32 1, label %c1
i32 2, label %c2 ], !prof !3
; We still shouldn't lose any frequency.
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
ret void
}
declare i32 @choose(i32)
!2 = metadata !{metadata !"branch_weights", i32 3, i32 1}
!3 = metadata !{metadata !"branch_weights", i32 2, i32 2, i32 2}

View File

@ -1,44 +0,0 @@
; RUN: opt < %s -analyze -block-freq | FileCheck %s
; CHECK-LABEL: Printing analysis {{.*}} for function 'loop_with_branch':
; CHECK-NEXT: block-frequency-info: loop_with_branch
define void @loop_with_branch(i32 %a) {
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
%skip_loop = call i1 @foo0(i32 %a)
br i1 %skip_loop, label %skip, label %header, !prof !0
; CHECK-NEXT: skip: float = 0.25,
skip:
br label %exit
; CHECK-NEXT: header: float = 4.5,
header:
%i = phi i32 [ 0, %entry ], [ %i.next, %back ]
%i.next = add i32 %i, 1
%choose = call i2 @foo1(i32 %i)
switch i2 %choose, label %exit [ i2 0, label %left
i2 1, label %right ], !prof !1
; CHECK-NEXT: left: float = 1.5,
left:
br label %back
; CHECK-NEXT: right: float = 2.25,
right:
br label %back
; CHECK-NEXT: back: float = 3.75,
back:
br label %header
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
ret void
}
declare i1 @foo0(i32)
declare i2 @foo1(i32)
!0 = metadata !{metadata !"branch_weights", i32 1, i32 3}
!1 = metadata !{metadata !"branch_weights", i32 1, i32 2, i32 3}

View File

@ -1,59 +0,0 @@
; RUN: opt < %s -analyze -block-freq | FileCheck %s
; CHECK-LABEL: Printing analysis {{.*}} for function 'nested_loop_with_branches'
; CHECK-NEXT: block-frequency-info: nested_loop_with_branches
define void @nested_loop_with_branches(i32 %a) {
; CHECK-NEXT: entry: float = 1.0, int = [[ENTRY:[0-9]+]]
entry:
%v0 = call i1 @foo0(i32 %a)
br i1 %v0, label %exit, label %outer, !prof !0
; CHECK-NEXT: outer: float = 12.0,
outer:
%i = phi i32 [ 0, %entry ], [ %i.next, %inner.end ], [ %i.next, %no_inner ]
%i.next = add i32 %i, 1
%do_inner = call i1 @foo1(i32 %i)
br i1 %do_inner, label %no_inner, label %inner, !prof !0
; CHECK-NEXT: inner: float = 36.0,
inner:
%j = phi i32 [ 0, %outer ], [ %j.next, %inner.end ]
%side = call i1 @foo3(i32 %j)
br i1 %side, label %left, label %right, !prof !0
; CHECK-NEXT: left: float = 9.0,
left:
%v4 = call i1 @foo4(i32 %j)
br label %inner.end
; CHECK-NEXT: right: float = 27.0,
right:
%v5 = call i1 @foo5(i32 %j)
br label %inner.end
; CHECK-NEXT: inner.end: float = 36.0,
inner.end:
%stay_inner = phi i1 [ %v4, %left ], [ %v5, %right ]
%j.next = add i32 %j, 1
br i1 %stay_inner, label %inner, label %outer, !prof !1
; CHECK-NEXT: no_inner: float = 3.0,
no_inner:
%continue = call i1 @foo6(i32 %i)
br i1 %continue, label %outer, label %exit, !prof !1
; CHECK-NEXT: exit: float = 1.0, int = [[ENTRY]]
exit:
ret void
}
declare i1 @foo0(i32)
declare i1 @foo1(i32)
declare i1 @foo2(i32)
declare i1 @foo3(i32)
declare i1 @foo4(i32)
declare i1 @foo5(i32)
declare i1 @foo6(i32)
!0 = metadata !{metadata !"branch_weights", i32 1, i32 3}
!1 = metadata !{metadata !"branch_weights", i32 3, i32 1}

View File

@ -287,8 +287,9 @@ define void @Unwind1() {
; CHECKFP: .LBB{{[0-9_]+}}
; CHECKFP-NEXT: ldc r2, 40
; CHECKFP-NEXT: add r2, r10, r2
; CHECKFP-NEXT: add r2, r2, r0
; CHECKFP-NEXT: add r0, r2, r0
; CHECKFP-NEXT: mov r3, r1
; CHECKFP-NEXT: mov r2, r0
; CHECKFP-NEXT: ldw r9, r10[4]
; CHECKFP-NEXT: ldw r8, r10[5]
; CHECKFP-NEXT: ldw r7, r10[6]
@ -336,8 +337,9 @@ define void @Unwind1() {
; CHECK-NEXT: ldc r2, 36
; CHECK-NEXT: ldaw r3, sp[0]
; CHECK-NEXT: add r2, r3, r2
; CHECK-NEXT: add r2, r2, r0
; CHECK-NEXT: add r0, r2, r0
; CHECK-NEXT: mov r3, r1
; CHECK-NEXT: mov r2, r0
; CHECK-NEXT: ldw r10, sp[2]
; CHECK-NEXT: ldw r9, sp[3]
; CHECK-NEXT: ldw r8, sp[4]