2011-08-02 21:50:27 +00:00
|
|
|
//===- PassManagerBuilder.cpp - Build Standard Pass -----------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the PassManagerBuilder class, which is used to set up a
|
|
|
|
// "standard" optimization sequence suitable for languages like C and C++.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
|
|
|
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
|
2011-08-09 22:17:34 +00:00
|
|
|
#include "llvm-c/Transforms/PassManagerBuilder.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2011-08-02 21:50:27 +00:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
2014-01-13 09:26:24 +00:00
|
|
|
#include "llvm/IR/Verifier.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/PassManager.h"
|
2012-02-01 03:51:43 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Support/ManagedStatic.h"
|
2011-08-02 21:50:27 +00:00
|
|
|
#include "llvm/Target/TargetLibraryInfo.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
2011-08-02 21:50:27 +00:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2012-02-01 03:51:43 +00:00
|
|
|
#include "llvm/Transforms/Vectorize.h"
|
2011-08-02 21:50:27 +00:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2012-02-01 03:51:43 +00:00
|
|
|
static cl::opt<bool>
|
2013-10-18 23:38:13 +00:00
|
|
|
RunLoopVectorization("vectorize-loops", cl::Hidden,
|
2012-10-30 18:37:43 +00:00
|
|
|
cl::desc("Run the Loop vectorization passes"));
|
2012-10-29 16:36:25 +00:00
|
|
|
|
|
|
|
static cl::opt<bool>
|
2013-10-18 23:38:13 +00:00
|
|
|
RunSLPVectorization("vectorize-slp", cl::Hidden,
|
2013-04-15 05:39:58 +00:00
|
|
|
cl::desc("Run the SLP vectorization passes"));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
2013-10-18 23:38:13 +00:00
|
|
|
RunBBVectorization("vectorize-slp-aggressive", cl::Hidden,
|
2013-04-15 05:39:58 +00:00
|
|
|
cl::desc("Run the BB vectorization passes"));
|
2012-02-01 03:51:43 +00:00
|
|
|
|
2012-04-13 17:15:33 +00:00
|
|
|
static cl::opt<bool>
|
|
|
|
UseGVNAfterVectorization("use-gvn-after-vectorization",
|
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run GVN instead of Early CSE after vectorization passes"));
|
|
|
|
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@163883 91177308-0d34-0410-b5e6-96231b3b80d8
2012-09-14 09:22:59 +00:00
|
|
|
static cl::opt<bool> UseNewSROA("use-new-sroa",
|
2012-10-02 04:24:01 +00:00
|
|
|
cl::init(true), cl::Hidden,
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@163883 91177308-0d34-0410-b5e6-96231b3b80d8
2012-09-14 09:22:59 +00:00
|
|
|
cl::desc("Enable the new, experimental SROA pass"));
|
|
|
|
|
2013-11-16 23:59:05 +00:00
|
|
|
static cl::opt<bool>
|
|
|
|
RunLoopRerolling("reroll-loops", cl::Hidden,
|
|
|
|
cl::desc("Run the loop rerolling pass"));
|
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
PassManagerBuilder::PassManagerBuilder() {
|
|
|
|
OptLevel = 2;
|
|
|
|
SizeLevel = 0;
|
2014-04-25 05:29:35 +00:00
|
|
|
LibraryInfo = nullptr;
|
|
|
|
Inliner = nullptr;
|
2014-04-18 01:05:15 +00:00
|
|
|
DisableTailCalls = false;
|
2011-08-02 21:50:27 +00:00
|
|
|
DisableUnitAtATime = false;
|
|
|
|
DisableUnrollLoops = false;
|
2013-04-15 05:39:58 +00:00
|
|
|
BBVectorize = RunBBVectorization;
|
2013-04-15 04:54:42 +00:00
|
|
|
SLPVectorize = RunSLPVectorization;
|
2012-10-29 16:36:25 +00:00
|
|
|
LoopVectorize = RunLoopVectorization;
|
2013-11-17 16:02:50 +00:00
|
|
|
RerollLoops = RunLoopRerolling;
|
2011-08-02 21:50:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
PassManagerBuilder::~PassManagerBuilder() {
|
|
|
|
delete LibraryInfo;
|
|
|
|
delete Inliner;
|
|
|
|
}
|
|
|
|
|
2011-08-16 13:58:41 +00:00
|
|
|
/// Set of global extensions, automatically added as part of the standard set.
|
|
|
|
static ManagedStatic<SmallVector<std::pair<PassManagerBuilder::ExtensionPointTy,
|
|
|
|
PassManagerBuilder::ExtensionFn>, 8> > GlobalExtensions;
|
|
|
|
|
|
|
|
void PassManagerBuilder::addGlobalExtension(
|
|
|
|
PassManagerBuilder::ExtensionPointTy Ty,
|
|
|
|
PassManagerBuilder::ExtensionFn Fn) {
|
|
|
|
GlobalExtensions->push_back(std::make_pair(Ty, Fn));
|
|
|
|
}
|
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
void PassManagerBuilder::addExtension(ExtensionPointTy Ty, ExtensionFn Fn) {
|
|
|
|
Extensions.push_back(std::make_pair(Ty, Fn));
|
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::addExtensionsToPM(ExtensionPointTy ETy,
|
|
|
|
PassManagerBase &PM) const {
|
2011-08-16 13:58:41 +00:00
|
|
|
for (unsigned i = 0, e = GlobalExtensions->size(); i != e; ++i)
|
|
|
|
if ((*GlobalExtensions)[i].first == ETy)
|
|
|
|
(*GlobalExtensions)[i].second(*this, PM);
|
2011-08-02 21:50:27 +00:00
|
|
|
for (unsigned i = 0, e = Extensions.size(); i != e; ++i)
|
|
|
|
if (Extensions[i].first == ETy)
|
|
|
|
Extensions[i].second(*this, PM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
PassManagerBuilder::addInitialAliasAnalysisPasses(PassManagerBase &PM) const {
|
|
|
|
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
|
|
|
|
// BasicAliasAnalysis wins if they disagree. This is intended to help
|
|
|
|
// support "obvious" type-punning idioms.
|
|
|
|
PM.add(createTypeBasedAliasAnalysisPass());
|
|
|
|
PM.add(createBasicAliasAnalysisPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::populateFunctionPassManager(FunctionPassManager &FPM) {
|
|
|
|
addExtensionsToPM(EP_EarlyAsPossible, FPM);
|
|
|
|
|
|
|
|
// Add LibraryInfo if we have some.
|
|
|
|
if (LibraryInfo) FPM.add(new TargetLibraryInfo(*LibraryInfo));
|
|
|
|
|
|
|
|
if (OptLevel == 0) return;
|
|
|
|
|
|
|
|
addInitialAliasAnalysisPasses(FPM);
|
|
|
|
|
|
|
|
FPM.add(createCFGSimplificationPass());
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@163883 91177308-0d34-0410-b5e6-96231b3b80d8
2012-09-14 09:22:59 +00:00
|
|
|
if (UseNewSROA)
|
|
|
|
FPM.add(createSROAPass());
|
|
|
|
else
|
|
|
|
FPM.add(createScalarReplAggregatesPass());
|
2011-08-02 21:50:27 +00:00
|
|
|
FPM.add(createEarlyCSEPass());
|
|
|
|
FPM.add(createLowerExpectIntrinsicPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
|
|
|
|
// If all optimizations are disabled, just run the always-inline pass.
|
|
|
|
if (OptLevel == 0) {
|
|
|
|
if (Inliner) {
|
|
|
|
MPM.add(Inliner);
|
2014-04-25 05:29:35 +00:00
|
|
|
Inliner = nullptr;
|
2011-08-02 21:50:27 +00:00
|
|
|
}
|
Introduce a BarrierNoop pass, a hack designed to allow *some* control
over the implicitly-formed-and-nesting CGSCC pass manager and function
pass managers, especially when using them on the opt commandline or
using extension points in the module builder. The '-barrier' opt flag
(or the pass itself) will create a no-op module pass in the pipeline,
resetting the pass manager stack, and allowing the creation of a new
pipeline of function passes or CGSCC passes to be created that is
independent from any previous pipelines.
For example, this can be used to test running two CGSCC passes in
independent CGSCC pass managers as opposed to in the same CGSCC pass
manager. It also allows us to introduce a further hack into the
PassManagerBuilder to separate the O0 pipeline extension passes from the
always-inliner's CGSCC pass manager, which they likely do not want to
participate in... At the very least none of the Sanitizer passes want
this behavior.
This fixes a bug with ASan at O0 currently, and I'll commit the ASan
test which covers this pass. I'm happy to add a test case that this pass
exists and works, but not sure how much time folks would like me to
spend adding test cases for the details of its behavior of partition
pass managers.... The whole thing is just vile, and mostly intended to
unblock ASan, so I'm hoping to rip this all out in a brave new pass
manager world.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@166172 91177308-0d34-0410-b5e6-96231b3b80d8
2012-10-18 08:05:46 +00:00
|
|
|
|
|
|
|
// FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
|
|
|
|
// pass manager, but we don't want to add extensions into that pass manager.
|
|
|
|
// To prevent this we must insert a no-op module pass to reset the pass
|
|
|
|
// manager to get the same behavior as EP_OptimizerLast in non-O0 builds.
|
|
|
|
if (!GlobalExtensions->empty() || !Extensions.empty())
|
|
|
|
MPM.add(createBarrierNoopPass());
|
|
|
|
|
2011-11-30 22:19:26 +00:00
|
|
|
addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
|
2011-08-02 21:50:27 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add LibraryInfo if we have some.
|
|
|
|
if (LibraryInfo) MPM.add(new TargetLibraryInfo(*LibraryInfo));
|
|
|
|
|
|
|
|
addInitialAliasAnalysisPasses(MPM);
|
|
|
|
|
|
|
|
if (!DisableUnitAtATime) {
|
2012-01-17 20:51:32 +00:00
|
|
|
addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
|
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
|
|
|
|
|
|
|
|
MPM.add(createIPSCCPPass()); // IP SCCP
|
|
|
|
MPM.add(createDeadArgEliminationPass()); // Dead argument elimination
|
|
|
|
|
|
|
|
MPM.add(createInstructionCombiningPass());// Clean up after IPCP & DAE
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-02 21:50:27 +00:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start of CallGraph SCC passes.
|
|
|
|
if (!DisableUnitAtATime)
|
|
|
|
MPM.add(createPruneEHPass()); // Remove dead EH info
|
|
|
|
if (Inliner) {
|
|
|
|
MPM.add(Inliner);
|
2014-04-25 05:29:35 +00:00
|
|
|
Inliner = nullptr;
|
2011-08-02 21:50:27 +00:00
|
|
|
}
|
|
|
|
if (!DisableUnitAtATime)
|
|
|
|
MPM.add(createFunctionAttrsPass()); // Set readonly/readnone attrs
|
|
|
|
if (OptLevel > 2)
|
|
|
|
MPM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args
|
|
|
|
|
|
|
|
// Start of function pass.
|
|
|
|
// Break up aggregate allocas, using SSAUpdater.
|
2012-09-15 11:43:14 +00:00
|
|
|
if (UseNewSROA)
|
|
|
|
MPM.add(createSROAPass(/*RequiresDomTree*/ false));
|
|
|
|
else
|
|
|
|
MPM.add(createScalarReplAggregatesPass(-1, false));
|
2011-08-02 21:50:27 +00:00
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
|
|
|
MPM.add(createJumpThreadingPass()); // Thread jumps.
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
|
|
|
MPM.add(createInstructionCombiningPass()); // Combine silly seq's
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-02 21:50:27 +00:00
|
|
|
|
2014-04-18 01:05:15 +00:00
|
|
|
if (!DisableTailCalls)
|
|
|
|
MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
|
2011-08-02 21:50:27 +00:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
|
|
|
MPM.add(createReassociatePass()); // Reassociate expressions
|
|
|
|
MPM.add(createLoopRotatePass()); // Rotate Loop
|
|
|
|
MPM.add(createLICMPass()); // Hoist loop invariants
|
|
|
|
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3));
|
|
|
|
MPM.add(createInstructionCombiningPass());
|
|
|
|
MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars
|
|
|
|
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
|
|
|
|
MPM.add(createLoopDeletionPass()); // Delete dead loops
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
if (!DisableUnrollLoops)
|
2014-03-31 23:23:51 +00:00
|
|
|
MPM.add(createSimpleLoopUnrollPass()); // Unroll small loops
|
2011-08-02 21:50:27 +00:00
|
|
|
addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
|
|
|
|
|
|
|
|
if (OptLevel > 1)
|
|
|
|
MPM.add(createGVNPass()); // Remove redundancies
|
|
|
|
MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
|
|
|
|
MPM.add(createSCCPPass()); // Constant prop with SCCP
|
|
|
|
|
|
|
|
// Run instcombine after redundancy elimination to exploit opportunities
|
|
|
|
// opened up by them.
|
|
|
|
MPM.add(createInstructionCombiningPass());
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-02 21:50:27 +00:00
|
|
|
MPM.add(createJumpThreadingPass()); // Thread jumps
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass());
|
|
|
|
MPM.add(createDeadStoreEliminationPass()); // Delete dead stores
|
|
|
|
|
|
|
|
addExtensionsToPM(EP_ScalarOptimizerLate, MPM);
|
|
|
|
|
2013-11-17 16:02:50 +00:00
|
|
|
if (RerollLoops)
|
2013-11-16 23:59:05 +00:00
|
|
|
MPM.add(createLoopRerollPass());
|
2013-08-28 23:40:29 +00:00
|
|
|
if (SLPVectorize)
|
|
|
|
MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
|
|
|
|
|
|
|
if (BBVectorize) {
|
|
|
|
MPM.add(createBBVectorizePass());
|
|
|
|
MPM.add(createInstructionCombiningPass());
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2013-08-28 23:40:29 +00:00
|
|
|
if (OptLevel > 1 && UseGVNAfterVectorization)
|
|
|
|
MPM.add(createGVNPass()); // Remove redundancies
|
|
|
|
else
|
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
|
|
|
|
|
|
|
// BBVectorize may have significantly shortened a loop body; unroll again.
|
|
|
|
if (!DisableUnrollLoops)
|
|
|
|
MPM.add(createLoopUnrollPass());
|
2012-02-01 03:51:43 +00:00
|
|
|
}
|
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
|
2013-08-06 02:43:45 +00:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
2011-08-02 21:50:27 +00:00
|
|
|
MPM.add(createInstructionCombiningPass()); // Clean up after everything.
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-02 21:50:27 +00:00
|
|
|
|
2013-12-05 21:20:02 +00:00
|
|
|
// FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
|
|
|
|
// pass manager that we are specifically trying to avoid. To prevent this
|
|
|
|
// we must insert a no-op module pass to reset the pass manager.
|
|
|
|
MPM.add(createBarrierNoopPass());
|
|
|
|
MPM.add(createLoopVectorizePass(DisableUnrollLoops, LoopVectorize));
|
|
|
|
// FIXME: Because of #pragma vectorize enable, the passes below are always
|
|
|
|
// inserted in the pipeline, even when the vectorizer doesn't run (ex. when
|
|
|
|
// on -O1 and no #pragma is found). Would be good to have these two passes
|
|
|
|
// as function calls, so that we can only pass them when the vectorizer
|
|
|
|
// changed the code.
|
|
|
|
MPM.add(createInstructionCombiningPass());
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2013-12-05 21:20:02 +00:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
2013-06-24 07:21:47 +00:00
|
|
|
|
2014-03-31 23:23:51 +00:00
|
|
|
if (!DisableUnrollLoops)
|
|
|
|
MPM.add(createLoopUnrollPass()); // Unroll small loops
|
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
if (!DisableUnitAtATime) {
|
|
|
|
// FIXME: We shouldn't bother with this anymore.
|
|
|
|
MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
|
|
|
|
|
2012-09-28 21:23:26 +00:00
|
|
|
// GlobalOpt already deletes dead functions and globals, at -O2 try a
|
2011-08-02 21:50:27 +00:00
|
|
|
// late pass of GlobalDCE. It is capable of deleting dead cycles.
|
2012-09-28 21:23:26 +00:00
|
|
|
if (OptLevel > 1) {
|
2011-08-02 21:50:27 +00:00
|
|
|
MPM.add(createGlobalDCEPass()); // Remove dead fns and globals.
|
|
|
|
MPM.add(createConstantMergePass()); // Merge dup global constants
|
2012-09-28 21:23:26 +00:00
|
|
|
}
|
2011-08-02 21:50:27 +00:00
|
|
|
}
|
2012-03-23 23:22:59 +00:00
|
|
|
addExtensionsToPM(EP_OptimizerLast, MPM);
|
2011-08-02 21:50:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
|
|
|
|
bool Internalize,
|
2012-04-02 22:16:50 +00:00
|
|
|
bool RunInliner,
|
|
|
|
bool DisableGVNLoadPRE) {
|
2011-08-02 21:50:27 +00:00
|
|
|
// Provide AliasAnalysis services for optimizations.
|
|
|
|
addInitialAliasAnalysisPasses(PM);
|
|
|
|
|
|
|
|
// Now that composite has been compiled, scan through the module, looking
|
|
|
|
// for a main function. If main is defined, mark all other functions
|
|
|
|
// internal.
|
2013-08-30 00:48:37 +00:00
|
|
|
if (Internalize)
|
2013-10-31 20:51:58 +00:00
|
|
|
PM.add(createInternalizePass("main"));
|
2011-08-02 21:50:27 +00:00
|
|
|
|
|
|
|
// Propagate constants at call sites into the functions they call. This
|
|
|
|
// opens opportunities for globalopt (and inlining) by substituting function
|
|
|
|
// pointers passed as arguments to direct uses of functions.
|
|
|
|
PM.add(createIPSCCPPass());
|
|
|
|
|
|
|
|
// Now that we internalized some globals, see if we can hack on them!
|
|
|
|
PM.add(createGlobalOptimizerPass());
|
|
|
|
|
|
|
|
// Linking modules together can lead to duplicated global constants, only
|
|
|
|
// keep one copy of each constant.
|
|
|
|
PM.add(createConstantMergePass());
|
|
|
|
|
|
|
|
// Remove unused arguments from functions.
|
|
|
|
PM.add(createDeadArgEliminationPass());
|
|
|
|
|
|
|
|
// Reduce the code after globalopt and ipsccp. Both can open up significant
|
|
|
|
// simplification opportunities, and both can propagate functions through
|
|
|
|
// function pointers. When this happens, we often have to resolve varargs
|
|
|
|
// calls, etc, so let instcombine do this.
|
|
|
|
PM.add(createInstructionCombiningPass());
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-02 21:50:27 +00:00
|
|
|
|
|
|
|
// Inline small functions
|
|
|
|
if (RunInliner)
|
|
|
|
PM.add(createFunctionInliningPass());
|
|
|
|
|
|
|
|
PM.add(createPruneEHPass()); // Remove dead EH info.
|
|
|
|
|
|
|
|
// Optimize globals again if we ran the inliner.
|
|
|
|
if (RunInliner)
|
|
|
|
PM.add(createGlobalOptimizerPass());
|
|
|
|
PM.add(createGlobalDCEPass()); // Remove dead functions.
|
|
|
|
|
|
|
|
// If we didn't decide to inline a function, check to see if we can
|
|
|
|
// transform it to pass arguments by value instead of by reference.
|
|
|
|
PM.add(createArgumentPromotionPass());
|
|
|
|
|
|
|
|
// The IPO passes may leave cruft around. Clean up after them.
|
|
|
|
PM.add(createInstructionCombiningPass());
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-02 21:50:27 +00:00
|
|
|
PM.add(createJumpThreadingPass());
|
2013-08-30 00:48:37 +00:00
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
// Break up allocas
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@163883 91177308-0d34-0410-b5e6-96231b3b80d8
2012-09-14 09:22:59 +00:00
|
|
|
if (UseNewSROA)
|
|
|
|
PM.add(createSROAPass());
|
|
|
|
else
|
|
|
|
PM.add(createScalarReplAggregatesPass());
|
2011-08-02 21:50:27 +00:00
|
|
|
|
|
|
|
// Run a few AA driven optimizations here and now, to cleanup the code.
|
|
|
|
PM.add(createFunctionAttrsPass()); // Add nocapture.
|
|
|
|
PM.add(createGlobalsModRefPass()); // IP alias analysis.
|
|
|
|
|
2012-04-02 22:16:50 +00:00
|
|
|
PM.add(createLICMPass()); // Hoist loop invariants.
|
|
|
|
PM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
|
|
|
|
PM.add(createMemCpyOptPass()); // Remove dead memcpys.
|
2013-08-30 00:48:37 +00:00
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
// Nuke dead stores.
|
|
|
|
PM.add(createDeadStoreEliminationPass());
|
|
|
|
|
2014-04-15 17:48:15 +00:00
|
|
|
// More loops are countable; try to optimize them.
|
|
|
|
PM.add(createIndVarSimplifyPass());
|
|
|
|
PM.add(createLoopDeletionPass());
|
2014-02-24 18:19:31 +00:00
|
|
|
PM.add(createLoopVectorizePass(true, true));
|
|
|
|
|
2014-05-05 23:14:46 +00:00
|
|
|
// More scalar chains could be vectorized due to more alias information
|
|
|
|
PM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
|
|
|
|
2011-08-02 21:50:27 +00:00
|
|
|
// Cleanup and simplify the code after the scalar optimizations.
|
|
|
|
PM.add(createInstructionCombiningPass());
|
2014-05-25 10:27:02 +00:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-02 21:50:27 +00:00
|
|
|
|
|
|
|
PM.add(createJumpThreadingPass());
|
|
|
|
|
|
|
|
// Delete basic blocks, which optimization passes may have killed.
|
2013-08-06 02:43:45 +00:00
|
|
|
PM.add(createCFGSimplificationPass());
|
2011-08-02 21:50:27 +00:00
|
|
|
|
|
|
|
// Now that we have optimized the program, discard unreachable functions.
|
|
|
|
PM.add(createGlobalDCEPass());
|
|
|
|
}
|
2011-08-09 22:17:34 +00:00
|
|
|
|
2013-04-22 22:47:22 +00:00
|
|
|
inline PassManagerBuilder *unwrap(LLVMPassManagerBuilderRef P) {
|
|
|
|
return reinterpret_cast<PassManagerBuilder*>(P);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline LLVMPassManagerBuilderRef wrap(PassManagerBuilder *P) {
|
|
|
|
return reinterpret_cast<LLVMPassManagerBuilderRef>(P);
|
|
|
|
}
|
|
|
|
|
2012-11-15 16:51:49 +00:00
|
|
|
LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate() {
|
2011-08-09 22:17:34 +00:00
|
|
|
PassManagerBuilder *PMB = new PassManagerBuilder();
|
|
|
|
return wrap(PMB);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
delete Builder;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned OptLevel) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->OptLevel = OptLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned SizeLevel) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->SizeLevel = SizeLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->DisableUnitAtATime = Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->DisableUnrollLoops = Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
2013-06-20 19:48:07 +00:00
|
|
|
// NOTE: The simplify-libcalls pass has been removed.
|
2011-08-09 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned Threshold) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->Inliner = createFunctionInliningPass(Threshold);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
FunctionPassManager *FPM = unwrap<FunctionPassManager>(PM);
|
|
|
|
Builder->populateFunctionPassManager(*FPM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
PassManagerBase *MPM = unwrap(PM);
|
|
|
|
Builder->populateModulePassManager(*MPM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM,
|
2013-03-10 21:58:22 +00:00
|
|
|
LLVMBool Internalize,
|
|
|
|
LLVMBool RunInliner) {
|
2011-08-09 22:17:34 +00:00
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
PassManagerBase *LPM = unwrap(PM);
|
2013-03-10 21:58:22 +00:00
|
|
|
Builder->populateLTOPassManager(*LPM, Internalize != 0, RunInliner != 0);
|
2011-08-09 22:17:34 +00:00
|
|
|
}
|