2008-05-14 20:38:44 +00:00
|
|
|
//===- SimplifyCFGPass.cpp - CFG Simplification Pass ----------------------===//
|
2005-04-21 23:48:37 +00:00
|
|
|
//
|
2003-10-20 19:43:21 +00:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-21 23:48:37 +00:00
|
|
|
//
|
2003-10-20 19:43:21 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-05-21 20:49:37 +00:00
|
|
|
//
|
2007-12-03 19:43:18 +00:00
|
|
|
// This file implements dead code elimination and basic block merging, along
|
|
|
|
// with a collection of other peephole control flow optimizations. For example:
|
2002-05-21 20:49:37 +00:00
|
|
|
//
|
2007-11-04 16:15:04 +00:00
|
|
|
// * Removes basic blocks with no predecessors.
|
|
|
|
// * Merges a basic block into its predecessor if there is only one and the
|
2002-05-21 20:49:37 +00:00
|
|
|
// predecessor only has one successor.
|
2007-11-04 16:15:04 +00:00
|
|
|
// * Eliminates PHI nodes for basic blocks with a single predecessor.
|
|
|
|
// * Eliminates a basic block that only contains an unconditional branch.
|
2007-12-03 19:43:18 +00:00
|
|
|
// * Changes invoke instructions to nounwind functions to be calls.
|
|
|
|
// * Change things like "if (x) if (y)" into "if (x&y)".
|
|
|
|
// * etc..
|
2002-05-21 20:49:37 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
2015-01-04 12:03:27 +00:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2013-01-07 03:08:10 +00:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Attributes.h"
|
2014-03-04 11:45:46 +00:00
|
|
|
#include "llvm/IR/CFG.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
2002-05-21 20:49:37 +00:00
|
|
|
#include "llvm/Pass.h"
|
[SimplifyCFG] threshold for folding branches with common destination
Summary:
This patch adds a threshold that controls the number of bonus instructions
allowed for folding branches with common destination. The original code allows
at most one bonus instruction. With this patch, users can customize the
threshold to allow multiple bonus instructions. The default threshold is still
1, so that the code behaves the same as before when users do not specify this
threshold.
The motivation of this change is that tuning this threshold significantly (up
to 25%) improves the performance of some CUDA programs in our internal code
base. In general, branch instructions are very expensive for GPU programs.
Therefore, it is sometimes worth trading more arithmetic computation for a more
straightened control flow. Here's a reduced example:
__global__ void foo(int a, int b, int c, int d, int e, int n,
const int *input, int *output) {
int sum = 0;
for (int i = 0; i < n; ++i)
sum += (((i ^ a) > b) && (((i | c ) ^ d) > e)) ? 0 : input[i];
*output = sum;
}
The select statement in the loop body translates to two branch instructions "if
((i ^ a) > b)" and "if (((i | c) ^ d) > e)" which share a common destination.
With the default threshold, SimplifyCFG is unable to fold them, because
computing the condition of the second branch "(i | c) ^ d > e" requires two
bonus instructions. With the threshold increased, SimplifyCFG can fold the two
branches so that the loop body contains only one branch, making the code
conceptually look like:
sum += (((i ^ a) > b) & (((i | c ) ^ d) > e)) ? 0 : input[i];
Increasing the threshold significantly improves the performance of this
particular example. In the configuration where both conditions are guaranteed
to be true, increasing the threshold from 1 to 2 improves the performance by
18.24%. Even in the configuration where the first condition is false and the
second condition is true, which favors shortcuts, increasing the threshold from
1 to 2 still improves the performance by 4.35%.
We are still looking for a good threshold and maybe a better cost model than
just counting the number of bonus instructions. However, according to the above
numbers, we think it is at least worth adding a threshold to enable more
experiments and tuning. Let me know what you think. Thanks!
Test Plan: Added one test case to check the threshold is in effect
Reviewers: nadav, eliben, meheff, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, llvm-commits
Differential Revision: http://reviews.llvm.org/D5529
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218711 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-30 22:23:38 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2004-01-09 06:02:20 +00:00
|
|
|
using namespace llvm;
|
2003-11-11 22:41:34 +00:00
|
|
|
|
2014-04-22 02:55:47 +00:00
|
|
|
#define DEBUG_TYPE "simplifycfg"
|
|
|
|
|
[SimplifyCFG] threshold for folding branches with common destination
Summary:
This patch adds a threshold that controls the number of bonus instructions
allowed for folding branches with common destination. The original code allows
at most one bonus instruction. With this patch, users can customize the
threshold to allow multiple bonus instructions. The default threshold is still
1, so that the code behaves the same as before when users do not specify this
threshold.
The motivation of this change is that tuning this threshold significantly (up
to 25%) improves the performance of some CUDA programs in our internal code
base. In general, branch instructions are very expensive for GPU programs.
Therefore, it is sometimes worth trading more arithmetic computation for a more
straightened control flow. Here's a reduced example:
__global__ void foo(int a, int b, int c, int d, int e, int n,
const int *input, int *output) {
int sum = 0;
for (int i = 0; i < n; ++i)
sum += (((i ^ a) > b) && (((i | c ) ^ d) > e)) ? 0 : input[i];
*output = sum;
}
The select statement in the loop body translates to two branch instructions "if
((i ^ a) > b)" and "if (((i | c) ^ d) > e)" which share a common destination.
With the default threshold, SimplifyCFG is unable to fold them, because
computing the condition of the second branch "(i | c) ^ d > e" requires two
bonus instructions. With the threshold increased, SimplifyCFG can fold the two
branches so that the loop body contains only one branch, making the code
conceptually look like:
sum += (((i ^ a) > b) & (((i | c ) ^ d) > e)) ? 0 : input[i];
Increasing the threshold significantly improves the performance of this
particular example. In the configuration where both conditions are guaranteed
to be true, increasing the threshold from 1 to 2 improves the performance by
18.24%. Even in the configuration where the first condition is false and the
second condition is true, which favors shortcuts, increasing the threshold from
1 to 2 still improves the performance by 4.35%.
We are still looking for a good threshold and maybe a better cost model than
just counting the number of bonus instructions. However, according to the above
numbers, we think it is at least worth adding a threshold to enable more
experiments and tuning. Let me know what you think. Thanks!
Test Plan: Added one test case to check the threshold is in effect
Reviewers: nadav, eliben, meheff, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, llvm-commits
Differential Revision: http://reviews.llvm.org/D5529
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218711 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-30 22:23:38 +00:00
|
|
|
static cl::opt<unsigned>
|
|
|
|
UserBonusInstThreshold("bonus-inst-threshold", cl::Hidden, cl::init(1),
|
|
|
|
cl::desc("Control the number of bonus instructions (default = 1)"));
|
|
|
|
|
2006-12-19 21:40:18 +00:00
|
|
|
STATISTIC(NumSimpl, "Number of blocks simplified");
|
2002-10-01 22:38:41 +00:00
|
|
|
|
2006-12-19 21:40:18 +00:00
|
|
|
namespace {
|
2013-07-27 00:01:07 +00:00
|
|
|
struct CFGSimplifyPass : public FunctionPass {
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
[SimplifyCFG] threshold for folding branches with common destination
Summary:
This patch adds a threshold that controls the number of bonus instructions
allowed for folding branches with common destination. The original code allows
at most one bonus instruction. With this patch, users can customize the
threshold to allow multiple bonus instructions. The default threshold is still
1, so that the code behaves the same as before when users do not specify this
threshold.
The motivation of this change is that tuning this threshold significantly (up
to 25%) improves the performance of some CUDA programs in our internal code
base. In general, branch instructions are very expensive for GPU programs.
Therefore, it is sometimes worth trading more arithmetic computation for a more
straightened control flow. Here's a reduced example:
__global__ void foo(int a, int b, int c, int d, int e, int n,
const int *input, int *output) {
int sum = 0;
for (int i = 0; i < n; ++i)
sum += (((i ^ a) > b) && (((i | c ) ^ d) > e)) ? 0 : input[i];
*output = sum;
}
The select statement in the loop body translates to two branch instructions "if
((i ^ a) > b)" and "if (((i | c) ^ d) > e)" which share a common destination.
With the default threshold, SimplifyCFG is unable to fold them, because
computing the condition of the second branch "(i | c) ^ d > e" requires two
bonus instructions. With the threshold increased, SimplifyCFG can fold the two
branches so that the loop body contains only one branch, making the code
conceptually look like:
sum += (((i ^ a) > b) & (((i | c ) ^ d) > e)) ? 0 : input[i];
Increasing the threshold significantly improves the performance of this
particular example. In the configuration where both conditions are guaranteed
to be true, increasing the threshold from 1 to 2 improves the performance by
18.24%. Even in the configuration where the first condition is false and the
second condition is true, which favors shortcuts, increasing the threshold from
1 to 2 still improves the performance by 4.35%.
We are still looking for a good threshold and maybe a better cost model than
just counting the number of bonus instructions. However, according to the above
numbers, we think it is at least worth adding a threshold to enable more
experiments and tuning. Let me know what you think. Thanks!
Test Plan: Added one test case to check the threshold is in effect
Reviewers: nadav, eliben, meheff, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, llvm-commits
Differential Revision: http://reviews.llvm.org/D5529
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218711 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-30 22:23:38 +00:00
|
|
|
unsigned BonusInstThreshold;
|
|
|
|
CFGSimplifyPass(int T = -1) : FunctionPass(ID) {
|
|
|
|
BonusInstThreshold = (T == -1) ? UserBonusInstThreshold : unsigned(T);
|
2013-08-06 02:43:45 +00:00
|
|
|
initializeCFGSimplifyPassPass(*PassRegistry::getPassRegistry());
|
2013-07-27 00:01:07 +00:00
|
|
|
}
|
2014-03-05 09:10:37 +00:00
|
|
|
bool runOnFunction(Function &F) override;
|
2013-08-06 02:43:45 +00:00
|
|
|
|
2014-03-05 09:10:37 +00:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2015-01-04 12:03:27 +00:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227669 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-31 03:43:40 +00:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2013-07-27 00:01:07 +00:00
|
|
|
}
|
|
|
|
};
|
2002-05-21 20:49:37 +00:00
|
|
|
}
|
|
|
|
|
2013-08-06 02:43:45 +00:00
|
|
|
char CFGSimplifyPass::ID = 0;
|
|
|
|
INITIALIZE_PASS_BEGIN(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
|
2013-07-27 00:01:07 +00:00
|
|
|
false)
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227669 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-31 03:43:40 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
2015-01-04 12:03:27 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2013-08-06 02:43:45 +00:00
|
|
|
INITIALIZE_PASS_END(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
|
2013-07-27 00:01:07 +00:00
|
|
|
false)
|
2008-05-13 00:00:25 +00:00
|
|
|
|
2003-11-11 22:41:34 +00:00
|
|
|
// Public interface to the CFGSimplification pass
|
[SimplifyCFG] threshold for folding branches with common destination
Summary:
This patch adds a threshold that controls the number of bonus instructions
allowed for folding branches with common destination. The original code allows
at most one bonus instruction. With this patch, users can customize the
threshold to allow multiple bonus instructions. The default threshold is still
1, so that the code behaves the same as before when users do not specify this
threshold.
The motivation of this change is that tuning this threshold significantly (up
to 25%) improves the performance of some CUDA programs in our internal code
base. In general, branch instructions are very expensive for GPU programs.
Therefore, it is sometimes worth trading more arithmetic computation for a more
straightened control flow. Here's a reduced example:
__global__ void foo(int a, int b, int c, int d, int e, int n,
const int *input, int *output) {
int sum = 0;
for (int i = 0; i < n; ++i)
sum += (((i ^ a) > b) && (((i | c ) ^ d) > e)) ? 0 : input[i];
*output = sum;
}
The select statement in the loop body translates to two branch instructions "if
((i ^ a) > b)" and "if (((i | c) ^ d) > e)" which share a common destination.
With the default threshold, SimplifyCFG is unable to fold them, because
computing the condition of the second branch "(i | c) ^ d > e" requires two
bonus instructions. With the threshold increased, SimplifyCFG can fold the two
branches so that the loop body contains only one branch, making the code
conceptually look like:
sum += (((i ^ a) > b) & (((i | c ) ^ d) > e)) ? 0 : input[i];
Increasing the threshold significantly improves the performance of this
particular example. In the configuration where both conditions are guaranteed
to be true, increasing the threshold from 1 to 2 improves the performance by
18.24%. Even in the configuration where the first condition is false and the
second condition is true, which favors shortcuts, increasing the threshold from
1 to 2 still improves the performance by 4.35%.
We are still looking for a good threshold and maybe a better cost model than
just counting the number of bonus instructions. However, according to the above
numbers, we think it is at least worth adding a threshold to enable more
experiments and tuning. Let me know what you think. Thanks!
Test Plan: Added one test case to check the threshold is in effect
Reviewers: nadav, eliben, meheff, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, llvm-commits
Differential Revision: http://reviews.llvm.org/D5529
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218711 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-30 22:23:38 +00:00
|
|
|
FunctionPass *llvm::createCFGSimplificationPass(int Threshold) {
|
|
|
|
return new CFGSimplifyPass(Threshold);
|
2002-05-21 20:49:37 +00:00
|
|
|
}
|
|
|
|
|
2012-09-06 00:59:08 +00:00
|
|
|
/// mergeEmptyReturnBlocks - If we have more than one empty (other than phi
|
2009-12-22 06:07:30 +00:00
|
|
|
/// node) return blocks, merge them together to promote recursive block merging.
|
2012-09-06 00:59:08 +00:00
|
|
|
static bool mergeEmptyReturnBlocks(Function &F) {
|
2009-12-22 06:07:30 +00:00
|
|
|
bool Changed = false;
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2014-04-25 05:29:35 +00:00
|
|
|
BasicBlock *RetBlock = nullptr;
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
// Scan all the blocks in the function, looking for empty return blocks.
|
|
|
|
for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; ) {
|
|
|
|
BasicBlock &BB = *BBI++;
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
// Only look at return blocks.
|
|
|
|
ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator());
|
2014-04-25 05:29:35 +00:00
|
|
|
if (!Ret) continue;
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
// Only look at the block if it is empty or the only other thing in it is a
|
|
|
|
// single PHI node that is the operand to the return.
|
|
|
|
if (Ret != &BB.front()) {
|
|
|
|
// Check for something else in the block.
|
|
|
|
BasicBlock::iterator I = Ret;
|
|
|
|
--I;
|
2010-03-14 10:40:55 +00:00
|
|
|
// Skip over debug info.
|
|
|
|
while (isa<DbgInfoIntrinsic>(I) && I != BB.begin())
|
|
|
|
--I;
|
|
|
|
if (!isa<DbgInfoIntrinsic>(I) &&
|
|
|
|
(!isa<PHINode>(I) || I != BB.begin() ||
|
|
|
|
Ret->getNumOperands() == 0 ||
|
|
|
|
Ret->getOperand(0) != I))
|
2009-12-22 06:07:30 +00:00
|
|
|
continue;
|
|
|
|
}
|
2010-03-14 10:40:55 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
// If this is the first returning block, remember it and keep going.
|
2014-04-25 05:29:35 +00:00
|
|
|
if (!RetBlock) {
|
2009-12-22 06:07:30 +00:00
|
|
|
RetBlock = &BB;
|
|
|
|
continue;
|
|
|
|
}
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
// Otherwise, we found a duplicate return block. Merge the two.
|
|
|
|
Changed = true;
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
// Case when there is no input to the return or when the returned values
|
|
|
|
// agree is trivial. Note that they can't agree if there are phis in the
|
|
|
|
// blocks.
|
|
|
|
if (Ret->getNumOperands() == 0 ||
|
2012-07-24 10:51:42 +00:00
|
|
|
Ret->getOperand(0) ==
|
2009-12-22 06:07:30 +00:00
|
|
|
cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0)) {
|
|
|
|
BB.replaceAllUsesWith(RetBlock);
|
|
|
|
BB.eraseFromParent();
|
|
|
|
continue;
|
|
|
|
}
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
// If the canonical return block has no PHI node, create one now.
|
|
|
|
PHINode *RetBlockPHI = dyn_cast<PHINode>(RetBlock->begin());
|
2014-04-25 05:29:35 +00:00
|
|
|
if (!RetBlockPHI) {
|
2010-03-15 19:05:46 +00:00
|
|
|
Value *InVal = cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0);
|
2011-03-30 11:19:20 +00:00
|
|
|
pred_iterator PB = pred_begin(RetBlock), PE = pred_end(RetBlock);
|
2011-03-30 11:28:46 +00:00
|
|
|
RetBlockPHI = PHINode::Create(Ret->getOperand(0)->getType(),
|
|
|
|
std::distance(PB, PE), "merge",
|
2009-12-22 06:07:30 +00:00
|
|
|
&RetBlock->front());
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2011-03-30 11:19:20 +00:00
|
|
|
for (pred_iterator PI = PB; PI != PE; ++PI)
|
2009-12-22 06:07:30 +00:00
|
|
|
RetBlockPHI->addIncoming(InVal, *PI);
|
|
|
|
RetBlock->getTerminator()->setOperand(0, RetBlockPHI);
|
|
|
|
}
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
// Turn BB into a block that just unconditionally branches to the return
|
|
|
|
// block. This handles the case when the two return blocks have a common
|
|
|
|
// predecessor but that return different things.
|
|
|
|
RetBlockPHI->addIncoming(Ret->getOperand(0), &BB);
|
|
|
|
BB.getTerminator()->eraseFromParent();
|
|
|
|
BranchInst::Create(RetBlock, &BB);
|
|
|
|
}
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2009-12-22 06:07:30 +00:00
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2012-09-06 00:59:08 +00:00
|
|
|
/// iterativelySimplifyCFG - Call SimplifyCFG on all the blocks in the function,
|
2007-11-13 07:32:38 +00:00
|
|
|
/// iterating until no more changes are made.
|
2013-01-07 03:53:25 +00:00
|
|
|
static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI,
|
2015-01-04 12:03:27 +00:00
|
|
|
const DataLayout *DL, AssumptionCache *AC,
|
[SimplifyCFG] threshold for folding branches with common destination
Summary:
This patch adds a threshold that controls the number of bonus instructions
allowed for folding branches with common destination. The original code allows
at most one bonus instruction. With this patch, users can customize the
threshold to allow multiple bonus instructions. The default threshold is still
1, so that the code behaves the same as before when users do not specify this
threshold.
The motivation of this change is that tuning this threshold significantly (up
to 25%) improves the performance of some CUDA programs in our internal code
base. In general, branch instructions are very expensive for GPU programs.
Therefore, it is sometimes worth trading more arithmetic computation for a more
straightened control flow. Here's a reduced example:
__global__ void foo(int a, int b, int c, int d, int e, int n,
const int *input, int *output) {
int sum = 0;
for (int i = 0; i < n; ++i)
sum += (((i ^ a) > b) && (((i | c ) ^ d) > e)) ? 0 : input[i];
*output = sum;
}
The select statement in the loop body translates to two branch instructions "if
((i ^ a) > b)" and "if (((i | c) ^ d) > e)" which share a common destination.
With the default threshold, SimplifyCFG is unable to fold them, because
computing the condition of the second branch "(i | c) ^ d > e" requires two
bonus instructions. With the threshold increased, SimplifyCFG can fold the two
branches so that the loop body contains only one branch, making the code
conceptually look like:
sum += (((i ^ a) > b) & (((i | c ) ^ d) > e)) ? 0 : input[i];
Increasing the threshold significantly improves the performance of this
particular example. In the configuration where both conditions are guaranteed
to be true, increasing the threshold from 1 to 2 improves the performance by
18.24%. Even in the configuration where the first condition is false and the
second condition is true, which favors shortcuts, increasing the threshold from
1 to 2 still improves the performance by 4.35%.
We are still looking for a good threshold and maybe a better cost model than
just counting the number of bonus instructions. However, according to the above
numbers, we think it is at least worth adding a threshold to enable more
experiments and tuning. Let me know what you think. Thanks!
Test Plan: Added one test case to check the threshold is in effect
Reviewers: nadav, eliben, meheff, resistor, hfinkel
Reviewed By: hfinkel
Subscribers: hfinkel, llvm-commits
Differential Revision: http://reviews.llvm.org/D5529
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218711 91177308-0d34-0410-b5e6-96231b3b80d8
2014-09-30 22:23:38 +00:00
|
|
|
unsigned BonusInstThreshold) {
|
2007-11-13 07:32:38 +00:00
|
|
|
bool Changed = false;
|
2002-05-21 20:49:37 +00:00
|
|
|
bool LocalChange = true;
|
|
|
|
while (LocalChange) {
|
|
|
|
LocalChange = false;
|
2012-07-24 10:51:42 +00:00
|
|
|
|
2010-08-14 00:29:42 +00:00
|
|
|
// Loop over all of the basic blocks and remove them if they are unneeded...
|
2002-05-21 20:49:37 +00:00
|
|
|
//
|
2010-08-14 00:29:42 +00:00
|
|
|
for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) {
|
2015-01-04 12:03:27 +00:00
|
|
|
if (SimplifyCFG(BBIt++, TTI, BonusInstThreshold, DL, AC)) {
|
2002-05-21 20:49:37 +00:00
|
|
|
LocalChange = true;
|
|
|
|
++NumSimpl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Changed |= LocalChange;
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
2007-11-13 07:32:38 +00:00
|
|
|
|
|
|
|
// It is possible that we may require multiple passes over the code to fully
|
|
|
|
// simplify the CFG.
|
|
|
|
//
|
|
|
|
bool CFGSimplifyPass::runOnFunction(Function &F) {
|
2014-02-06 00:07:05 +00:00
|
|
|
if (skipOptnoneFunction(F))
|
|
|
|
return false;
|
|
|
|
|
2015-01-04 12:03:27 +00:00
|
|
|
AssumptionCache *AC =
|
|
|
|
&getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227669 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-31 03:43:40 +00:00
|
|
|
const TargetTransformInfo &TTI =
|
|
|
|
getAnalysis<TargetTransformInfoWrapperPass>().getTTI();
|
2014-02-25 17:30:31 +00:00
|
|
|
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
|
2014-04-25 05:29:35 +00:00
|
|
|
const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
|
2013-08-12 22:38:43 +00:00
|
|
|
bool EverChanged = removeUnreachableBlocks(F);
|
2012-09-06 00:59:08 +00:00
|
|
|
EverChanged |= mergeEmptyReturnBlocks(F);
|
2015-01-04 12:03:27 +00:00
|
|
|
EverChanged |= iterativelySimplifyCFG(F, TTI, DL, AC, BonusInstThreshold);
|
2010-02-05 22:03:18 +00:00
|
|
|
|
2007-11-13 07:32:38 +00:00
|
|
|
// If neither pass changed anything, we're done.
|
|
|
|
if (!EverChanged) return false;
|
|
|
|
|
2012-09-06 00:59:08 +00:00
|
|
|
// iterativelySimplifyCFG can (rarely) make some loops dead. If this happens,
|
2013-08-12 22:38:43 +00:00
|
|
|
// removeUnreachableBlocks is needed to nuke them, which means we should
|
2007-11-13 07:32:38 +00:00
|
|
|
// iterate between the two optimizations. We structure the code like this to
|
2012-09-06 00:59:08 +00:00
|
|
|
// avoid reruning iterativelySimplifyCFG if the second pass of
|
2013-08-12 22:38:43 +00:00
|
|
|
// removeUnreachableBlocks doesn't do anything.
|
|
|
|
if (!removeUnreachableBlocks(F))
|
2007-11-13 07:32:38 +00:00
|
|
|
return true;
|
2010-02-05 22:03:18 +00:00
|
|
|
|
2007-11-13 07:32:38 +00:00
|
|
|
do {
|
2015-01-04 12:03:27 +00:00
|
|
|
EverChanged = iterativelySimplifyCFG(F, TTI, DL, AC, BonusInstThreshold);
|
2013-08-12 22:38:43 +00:00
|
|
|
EverChanged |= removeUnreachableBlocks(F);
|
2007-11-13 07:32:38 +00:00
|
|
|
} while (EverChanged);
|
2010-02-05 22:03:18 +00:00
|
|
|
|
2007-11-13 07:32:38 +00:00
|
|
|
return true;
|
|
|
|
}
|