2010-01-05 05:57:49 +00:00
|
|
|
//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the visit functions for load, store and alloca.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "InstCombine.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2010-05-28 16:19:17 +00:00
|
|
|
#include "llvm/Analysis/Loads.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-10-19 10:46:46 +00:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2010-01-05 05:57:49 +00:00
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 02:55:47 +00:00
|
|
|
#define DEBUG_TYPE "instcombine"
|
|
|
|
|
2012-08-21 08:39:44 +00:00
|
|
|
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
|
|
|
|
STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
|
|
|
|
|
|
|
|
/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
|
|
|
|
/// some part of a constant global variable. This intentionally only accepts
|
|
|
|
/// constant expressions because we can't rewrite arbitrary instructions.
|
|
|
|
static bool pointsToConstantGlobal(Value *V) {
|
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
|
|
|
|
return GV->isConstant();
|
2014-04-24 00:01:09 +00:00
|
|
|
|
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
|
2012-08-21 08:39:44 +00:00
|
|
|
if (CE->getOpcode() == Instruction::BitCast ||
|
2014-04-24 00:01:09 +00:00
|
|
|
CE->getOpcode() == Instruction::AddrSpaceCast ||
|
2012-08-21 08:39:44 +00:00
|
|
|
CE->getOpcode() == Instruction::GetElementPtr)
|
|
|
|
return pointsToConstantGlobal(CE->getOperand(0));
|
2014-04-24 00:01:09 +00:00
|
|
|
}
|
2012-08-21 08:39:44 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
|
|
|
|
/// pointer to an alloca. Ignore any reads of the pointer, return false if we
|
|
|
|
/// see any stores or other unknown uses. If we see pointer arithmetic, keep
|
|
|
|
/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
|
|
|
|
/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
|
|
|
|
/// the alloca, and if the source pointer is a pointer to a constant global, we
|
|
|
|
/// can optimize this.
|
|
|
|
static bool
|
|
|
|
isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
SmallVectorImpl<Instruction *> &ToDelete) {
|
2012-08-21 08:39:44 +00:00
|
|
|
// We track lifetime intrinsics as we encounter them. If we decide to go
|
|
|
|
// ahead and replace the value with the global, this lets the caller quickly
|
|
|
|
// eliminate the markers.
|
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
|
|
|
|
ValuesToInspect.push_back(std::make_pair(V, false));
|
|
|
|
while (!ValuesToInspect.empty()) {
|
|
|
|
auto ValuePair = ValuesToInspect.pop_back_val();
|
|
|
|
const bool IsOffset = ValuePair.second;
|
|
|
|
for (auto &U : ValuePair.first->uses()) {
|
|
|
|
Instruction *I = cast<Instruction>(U.getUser());
|
|
|
|
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
|
|
|
// Ignore non-volatile loads, they are always ok.
|
|
|
|
if (!LI->isSimple()) return false;
|
|
|
|
continue;
|
|
|
|
}
|
2012-08-21 08:39:44 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
|
|
|
|
// If uses of the bitcast are ok, we are ok.
|
|
|
|
ValuesToInspect.push_back(std::make_pair(I, IsOffset));
|
2012-08-21 08:39:44 +00:00
|
|
|
continue;
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
}
|
|
|
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
|
|
|
|
// If the GEP has all zero indices, it doesn't offset the pointer. If it
|
|
|
|
// doesn't, it does.
|
|
|
|
ValuesToInspect.push_back(
|
|
|
|
std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
|
|
|
|
continue;
|
|
|
|
}
|
2012-08-21 08:39:44 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
if (CallSite CS = I) {
|
|
|
|
// If this is the function being called then we treat it like a load and
|
|
|
|
// ignore it.
|
|
|
|
if (CS.isCallee(&U))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Inalloca arguments are clobbered by the call.
|
|
|
|
unsigned ArgNo = CS.getArgumentNo(&U);
|
|
|
|
if (CS.isInAllocaArgument(ArgNo))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If this is a readonly/readnone call site, then we know it is just a
|
|
|
|
// load (but one that potentially returns the value itself), so we can
|
|
|
|
// ignore it if we know that the value isn't captured.
|
|
|
|
if (CS.onlyReadsMemory() &&
|
|
|
|
(CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If this is being passed as a byval argument, the caller is making a
|
|
|
|
// copy, so it is only a read of the alloca.
|
|
|
|
if (CS.isByValArgument(ArgNo))
|
|
|
|
continue;
|
|
|
|
}
|
2014-01-28 02:38:36 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
// Lifetime intrinsics can be handled by the caller.
|
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
|
|
|
|
II->getIntrinsicID() == Intrinsic::lifetime_end) {
|
|
|
|
assert(II->use_empty() && "Lifetime markers have no result to use!");
|
|
|
|
ToDelete.push_back(II);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2012-08-21 08:39:44 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
// If this is isn't our memcpy/memmove, reject it as something we can't
|
|
|
|
// handle.
|
|
|
|
MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
|
|
|
|
if (!MI)
|
|
|
|
return false;
|
2012-08-21 08:39:44 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
// If the transfer is using the alloca as a source of the transfer, then
|
|
|
|
// ignore it since it is a load (unless the transfer is volatile).
|
|
|
|
if (U.getOperandNo() == 1) {
|
|
|
|
if (MI->isVolatile()) return false;
|
2012-08-21 08:39:44 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
// If we already have seen a copy, reject the second one.
|
|
|
|
if (TheCopy) return false;
|
2012-08-21 08:39:44 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
// If the pointer has been offset from the start of the alloca, we can't
|
|
|
|
// safely handle this.
|
|
|
|
if (IsOffset) return false;
|
2012-08-21 08:39:44 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
// If the memintrinsic isn't using the alloca as the dest, reject it.
|
|
|
|
if (U.getOperandNo() != 0) return false;
|
2012-08-21 08:39:44 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
// If the source of the memcpy/move is not a constant global, reject it.
|
|
|
|
if (!pointsToConstantGlobal(MI->getSource()))
|
|
|
|
return false;
|
2012-08-21 08:39:44 +00:00
|
|
|
|
Optimize InstCombine stack memory consumption
This patch reduces the stack memory consumption of the InstCombine
function "isOnlyCopiedFromConstantGlobal() ", that in certain conditions
could overflow the stack because of excessive recursiveness.
For example, in a case like this:
%0 = alloca [50025 x i32], align 4
%1 = getelementptr inbounds [50025 x i32]* %0, i64 0, i64 0
store i32 0, i32* %1
%2 = getelementptr inbounds i32* %1, i64 1
store i32 1, i32* %2
%3 = getelementptr inbounds i32* %2, i64 1
store i32 2, i32* %3
%4 = getelementptr inbounds i32* %3, i64 1
store i32 3, i32* %4
%5 = getelementptr inbounds i32* %4, i64 1
store i32 4, i32* %5
%6 = getelementptr inbounds i32* %5, i64 1
store i32 5, i32* %6
...
This piece of code crashes llvm when trying to apply instcombine on
desktop. On embedded devices this could happen with a much lower limit
of recursiveness. Some instructions (getelementptr and bitcasts) make
the function recursively call itself on their uses, which is what makes
the example above consume so much stack (it becomes a recursive
depth-first tree visit with a very big depth).
The patch changes the algorithm to be semantically equivalent, but
iterative instead of recursive and the visiting order to be from a
depth-first visit to a breadth-first visit (visit all the instructions
of the current level before the ones of the next one).
Now if a lot of memory is required a heap allocation is done instead of
the the stack allocation, avoiding the possible crash.
Reviewed By: rnk
Differential Revision: http://reviews.llvm.org/D4355
Patch by Marcello Maggioni! We don't generally commit large stress test
that look for out of memory conditions, so I didn't request that one be
added to the patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212133 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-01 21:36:20 +00:00
|
|
|
// Otherwise, the transform is safe. Remember the copy instruction.
|
|
|
|
TheCopy = MI;
|
|
|
|
}
|
2012-08-21 08:39:44 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
|
|
|
|
/// modified by a copy from a constant global. If we can prove this, we can
|
|
|
|
/// replace any uses of the alloca with uses of the global directly.
|
|
|
|
static MemTransferInst *
|
|
|
|
isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
|
|
|
|
SmallVectorImpl<Instruction *> &ToDelete) {
|
2014-04-25 05:29:35 +00:00
|
|
|
MemTransferInst *TheCopy = nullptr;
|
2012-08-21 08:39:44 +00:00
|
|
|
if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
|
|
|
|
return TheCopy;
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2012-08-21 08:39:44 +00:00
|
|
|
}
|
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
2010-05-28 15:09:00 +00:00
|
|
|
// Ensure that the alloca array size argument has type intptr_t, so that
|
|
|
|
// any casting is exposed early.
|
2014-02-21 00:06:31 +00:00
|
|
|
if (DL) {
|
|
|
|
Type *IntPtrTy = DL->getIntPtrType(AI.getType());
|
2010-05-28 15:09:00 +00:00
|
|
|
if (AI.getArraySize()->getType() != IntPtrTy) {
|
|
|
|
Value *V = Builder->CreateIntCast(AI.getArraySize(),
|
|
|
|
IntPtrTy, false);
|
|
|
|
AI.setOperand(0, V);
|
|
|
|
return &AI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
|
|
|
|
if (AI.isArrayAllocation()) { // Check C != 1
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
|
2013-04-05 21:20:12 +00:00
|
|
|
Type *NewTy =
|
2010-01-05 05:57:49 +00:00
|
|
|
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
|
2014-04-25 05:29:35 +00:00
|
|
|
AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName());
|
2010-01-05 05:57:49 +00:00
|
|
|
New->setAlignment(AI.getAlignment());
|
|
|
|
|
|
|
|
// Scan to the end of the allocation instructions, to skip over a block of
|
|
|
|
// allocas if possible...also skip interleaved debug info
|
|
|
|
//
|
|
|
|
BasicBlock::iterator It = New;
|
|
|
|
while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
|
|
|
|
|
|
|
|
// Now that I is pointing to the first non-allocation-inst in the block,
|
|
|
|
// insert our getelementptr instruction...
|
|
|
|
//
|
2014-02-21 00:06:31 +00:00
|
|
|
Type *IdxTy = DL
|
|
|
|
? DL->getIntPtrType(AI.getType())
|
2013-08-14 00:24:38 +00:00
|
|
|
: Type::getInt64Ty(AI.getContext());
|
|
|
|
Value *NullIdx = Constant::getNullValue(IdxTy);
|
2013-08-14 00:24:05 +00:00
|
|
|
Value *Idx[2] = { NullIdx, NullIdx };
|
2011-05-18 23:58:37 +00:00
|
|
|
Instruction *GEP =
|
2013-08-14 00:24:05 +00:00
|
|
|
GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
|
2011-05-18 23:58:37 +00:00
|
|
|
InsertNewInstBefore(GEP, *It);
|
2010-01-05 05:57:49 +00:00
|
|
|
|
|
|
|
// Now make everything use the getelementptr instead of the original
|
|
|
|
// allocation.
|
2011-05-18 23:58:37 +00:00
|
|
|
return ReplaceInstUsesWith(AI, GEP);
|
2010-01-05 05:57:49 +00:00
|
|
|
} else if (isa<UndefValue>(AI.getArraySize())) {
|
|
|
|
return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-21 00:06:31 +00:00
|
|
|
if (DL && AI.getAllocatedType()->isSized()) {
|
2010-01-05 05:57:49 +00:00
|
|
|
// If the alignment is 0 (unspecified), assign it the preferred alignment.
|
|
|
|
if (AI.getAlignment() == 0)
|
2014-02-21 00:06:31 +00:00
|
|
|
AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
|
Replacing zero-sized alloca's with a null pointer is too aggressive, instead
merge all zero-sized alloca's into one, fixing c43204g from the Ada ACATS
conformance testsuite. What happened there was that a variable sized object
was being allocated on the stack, "alloca i8, i32 %size". It was then being
passed to another function, which tested that the address was not null (raising
an exception if it was) then manipulated %size bytes in it (load and/or store).
The optimizers cleverly managed to deduce that %size was zero (congratulations
to them, as it isn't at all obvious), which made the alloca zero size, causing
the optimizers to replace it with null, which then caused the check mentioned
above to fail, and the exception to be raised, wrongly. Note that no loads
and stores were actually being done to the alloca (the loop that does them is
executed %size times, i.e. is not executed), only the not-null address check.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159202 91177308-0d34-0410-b5e6-96231b3b80d8
2012-06-26 13:39:21 +00:00
|
|
|
|
|
|
|
// Move all alloca's of zero byte objects to the entry block and merge them
|
|
|
|
// together. Note that we only do this for alloca's, because malloc should
|
|
|
|
// allocate and return a unique pointer, even for a zero byte allocation.
|
2014-02-21 00:06:31 +00:00
|
|
|
if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
|
Replacing zero-sized alloca's with a null pointer is too aggressive, instead
merge all zero-sized alloca's into one, fixing c43204g from the Ada ACATS
conformance testsuite. What happened there was that a variable sized object
was being allocated on the stack, "alloca i8, i32 %size". It was then being
passed to another function, which tested that the address was not null (raising
an exception if it was) then manipulated %size bytes in it (load and/or store).
The optimizers cleverly managed to deduce that %size was zero (congratulations
to them, as it isn't at all obvious), which made the alloca zero size, causing
the optimizers to replace it with null, which then caused the check mentioned
above to fail, and the exception to be raised, wrongly. Note that no loads
and stores were actually being done to the alloca (the loop that does them is
executed %size times, i.e. is not executed), only the not-null address check.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159202 91177308-0d34-0410-b5e6-96231b3b80d8
2012-06-26 13:39:21 +00:00
|
|
|
// For a zero sized alloca there is no point in doing an array allocation.
|
|
|
|
// This is helpful if the array size is a complicated expression not used
|
|
|
|
// elsewhere.
|
|
|
|
if (AI.isArrayAllocation()) {
|
|
|
|
AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
|
|
|
|
return &AI;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the first instruction in the entry block.
|
|
|
|
BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
|
|
|
|
Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
|
|
|
|
if (FirstInst != &AI) {
|
|
|
|
// If the entry block doesn't start with a zero-size alloca then move
|
|
|
|
// this one to the start of the entry block. There is no problem with
|
|
|
|
// dominance as the array size was forced to a constant earlier already.
|
|
|
|
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
|
|
|
|
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
|
2014-02-21 00:06:31 +00:00
|
|
|
DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
|
Replacing zero-sized alloca's with a null pointer is too aggressive, instead
merge all zero-sized alloca's into one, fixing c43204g from the Ada ACATS
conformance testsuite. What happened there was that a variable sized object
was being allocated on the stack, "alloca i8, i32 %size". It was then being
passed to another function, which tested that the address was not null (raising
an exception if it was) then manipulated %size bytes in it (load and/or store).
The optimizers cleverly managed to deduce that %size was zero (congratulations
to them, as it isn't at all obvious), which made the alloca zero size, causing
the optimizers to replace it with null, which then caused the check mentioned
above to fail, and the exception to be raised, wrongly. Note that no loads
and stores were actually being done to the alloca (the loop that does them is
executed %size times, i.e. is not executed), only the not-null address check.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159202 91177308-0d34-0410-b5e6-96231b3b80d8
2012-06-26 13:39:21 +00:00
|
|
|
AI.moveBefore(FirstInst);
|
|
|
|
return &AI;
|
|
|
|
}
|
|
|
|
|
2012-09-18 09:31:44 +00:00
|
|
|
// If the alignment of the entry block alloca is 0 (unspecified),
|
|
|
|
// assign it the preferred alignment.
|
|
|
|
if (EntryAI->getAlignment() == 0)
|
|
|
|
EntryAI->setAlignment(
|
2014-02-21 00:06:31 +00:00
|
|
|
DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
|
Replacing zero-sized alloca's with a null pointer is too aggressive, instead
merge all zero-sized alloca's into one, fixing c43204g from the Ada ACATS
conformance testsuite. What happened there was that a variable sized object
was being allocated on the stack, "alloca i8, i32 %size". It was then being
passed to another function, which tested that the address was not null (raising
an exception if it was) then manipulated %size bytes in it (load and/or store).
The optimizers cleverly managed to deduce that %size was zero (congratulations
to them, as it isn't at all obvious), which made the alloca zero size, causing
the optimizers to replace it with null, which then caused the check mentioned
above to fail, and the exception to be raised, wrongly. Note that no loads
and stores were actually being done to the alloca (the loop that does them is
executed %size times, i.e. is not executed), only the not-null address check.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159202 91177308-0d34-0410-b5e6-96231b3b80d8
2012-06-26 13:39:21 +00:00
|
|
|
// Replace this zero-sized alloca with the one at the start of the entry
|
|
|
|
// block after ensuring that the address will be aligned enough for both
|
|
|
|
// types.
|
2012-09-18 09:31:44 +00:00
|
|
|
unsigned MaxAlign = std::max(EntryAI->getAlignment(),
|
|
|
|
AI.getAlignment());
|
Replacing zero-sized alloca's with a null pointer is too aggressive, instead
merge all zero-sized alloca's into one, fixing c43204g from the Ada ACATS
conformance testsuite. What happened there was that a variable sized object
was being allocated on the stack, "alloca i8, i32 %size". It was then being
passed to another function, which tested that the address was not null (raising
an exception if it was) then manipulated %size bytes in it (load and/or store).
The optimizers cleverly managed to deduce that %size was zero (congratulations
to them, as it isn't at all obvious), which made the alloca zero size, causing
the optimizers to replace it with null, which then caused the check mentioned
above to fail, and the exception to be raised, wrongly. Note that no loads
and stores were actually being done to the alloca (the loop that does them is
executed %size times, i.e. is not executed), only the not-null address check.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159202 91177308-0d34-0410-b5e6-96231b3b80d8
2012-06-26 13:39:21 +00:00
|
|
|
EntryAI->setAlignment(MaxAlign);
|
|
|
|
if (AI.getType() != EntryAI->getType())
|
|
|
|
return new BitCastInst(EntryAI, AI.getType());
|
|
|
|
return ReplaceInstUsesWith(AI, EntryAI);
|
|
|
|
}
|
2012-08-21 08:39:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-26 23:04:53 +00:00
|
|
|
if (AI.getAlignment()) {
|
2012-09-24 17:10:03 +00:00
|
|
|
// Check to see if this allocation is only modified by a memcpy/memmove from
|
|
|
|
// a constant global whose alignment is equal to or exceeds that of the
|
|
|
|
// allocation. If this is the case, we can change all users to use
|
|
|
|
// the constant global instead. This is commonly produced by the CFE by
|
|
|
|
// constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
|
|
|
|
// is only subsequently read.
|
|
|
|
SmallVector<Instruction *, 4> ToDelete;
|
|
|
|
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
|
2015-01-04 12:03:27 +00:00
|
|
|
unsigned SourceAlign = getOrEnforceKnownAlignment(
|
|
|
|
Copy->getSource(), AI.getAlignment(), DL, AC, &AI, DT);
|
2012-11-26 23:04:53 +00:00
|
|
|
if (AI.getAlignment() <= SourceAlign) {
|
2012-09-24 17:10:03 +00:00
|
|
|
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
|
|
|
|
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
|
|
|
|
for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
|
|
|
|
EraseInstFromFunction(*ToDelete[i]);
|
|
|
|
Constant *TheSrc = cast<Constant>(Copy->getSource());
|
2013-12-07 02:58:45 +00:00
|
|
|
Constant *Cast
|
|
|
|
= ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
|
|
|
|
Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
|
2012-09-24 17:10:03 +00:00
|
|
|
EraseInstFromFunction(*Copy);
|
|
|
|
++NumGlobalCopies;
|
|
|
|
return NewI;
|
|
|
|
}
|
Replacing zero-sized alloca's with a null pointer is too aggressive, instead
merge all zero-sized alloca's into one, fixing c43204g from the Ada ACATS
conformance testsuite. What happened there was that a variable sized object
was being allocated on the stack, "alloca i8, i32 %size". It was then being
passed to another function, which tested that the address was not null (raising
an exception if it was) then manipulated %size bytes in it (load and/or store).
The optimizers cleverly managed to deduce that %size was zero (congratulations
to them, as it isn't at all obvious), which made the alloca zero size, causing
the optimizers to replace it with null, which then caused the check mentioned
above to fail, and the exception to be raised, wrongly. Note that no loads
and stores were actually being done to the alloca (the loop that does them is
executed %size times, i.e. is not executed), only the not-null address check.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159202 91177308-0d34-0410-b5e6-96231b3b80d8
2012-06-26 13:39:21 +00:00
|
|
|
}
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
2012-07-09 18:38:20 +00:00
|
|
|
// At last, use the generic allocation site handler to aggressively remove
|
|
|
|
// unused allocas.
|
|
|
|
return visitAllocSite(AI);
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
2014-10-19 10:46:46 +00:00
|
|
|
/// \brief Helper to combine a load to a new type.
|
|
|
|
///
|
|
|
|
/// This just does the work of combining a load to a new type. It handles
|
|
|
|
/// metadata, etc., and returns the new instruction. The \c NewTy should be the
|
|
|
|
/// loaded *value* type. This will convert it to a pointer, cast the operand to
|
|
|
|
/// that pointer type, load it, etc.
|
|
|
|
///
|
|
|
|
/// Note that this will create all of the instructions with whatever insert
|
|
|
|
/// point the \c InstCombiner currently is using.
|
|
|
|
static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
|
|
|
|
Value *Ptr = LI.getPointerOperand();
|
|
|
|
unsigned AS = LI.getPointerAddressSpace();
|
2014-11-11 21:30:22 +00:00
|
|
|
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
|
2014-10-19 10:46:46 +00:00
|
|
|
LI.getAllMetadata(MD);
|
|
|
|
|
|
|
|
LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
|
|
|
|
IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
|
|
|
|
LI.getAlignment(), LI.getName());
|
|
|
|
for (const auto &MDPair : MD) {
|
|
|
|
unsigned ID = MDPair.first;
|
2014-11-11 21:30:22 +00:00
|
|
|
MDNode *N = MDPair.second;
|
2014-10-19 10:46:46 +00:00
|
|
|
// Note, essentially every kind of metadata should be preserved here! This
|
|
|
|
// routine is supposed to clone a load instruction changing *only its type*.
|
|
|
|
// The only metadata it makes sense to drop is metadata which is invalidated
|
|
|
|
// when the pointer type changes. This should essentially never be the case
|
|
|
|
// in LLVM, but we explicitly switch over only known metadata to be
|
|
|
|
// conservatively correct. If you are adding metadata to LLVM which pertains
|
|
|
|
// to loads, you almost certainly want to add it here.
|
|
|
|
switch (ID) {
|
|
|
|
case LLVMContext::MD_dbg:
|
|
|
|
case LLVMContext::MD_tbaa:
|
|
|
|
case LLVMContext::MD_prof:
|
|
|
|
case LLVMContext::MD_fpmath:
|
|
|
|
case LLVMContext::MD_tbaa_struct:
|
|
|
|
case LLVMContext::MD_invariant_load:
|
|
|
|
case LLVMContext::MD_alias_scope:
|
|
|
|
case LLVMContext::MD_noalias:
|
2014-10-21 00:13:20 +00:00
|
|
|
case LLVMContext::MD_nontemporal:
|
|
|
|
case LLVMContext::MD_mem_parallel_loop_access:
|
2014-10-21 21:00:03 +00:00
|
|
|
case LLVMContext::MD_nonnull:
|
2014-10-19 10:46:46 +00:00
|
|
|
// All of these directly apply.
|
|
|
|
NewLoad->setMetadata(ID, N);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LLVMContext::MD_range:
|
|
|
|
// FIXME: It would be nice to propagate this in some way, but the type
|
|
|
|
// conversions make it hard.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NewLoad;
|
|
|
|
}
|
|
|
|
|
2014-10-18 06:36:22 +00:00
|
|
|
/// \brief Combine loads to match the type of value their uses after looking
|
|
|
|
/// through intervening bitcasts.
|
|
|
|
///
|
|
|
|
/// The core idea here is that if the result of a load is used in an operation,
|
|
|
|
/// we should load the type most conducive to that operation. For example, when
|
|
|
|
/// loading an integer and converting that immediately to a pointer, we should
|
|
|
|
/// instead directly load a pointer.
|
|
|
|
///
|
|
|
|
/// However, this routine must never change the width of a load or the number of
|
|
|
|
/// loads as that would introduce a semantic change. This combine is expected to
|
|
|
|
/// be a semantic no-op which just allows loads to more closely model the types
|
|
|
|
/// of their consuming operations.
|
|
|
|
///
|
|
|
|
/// Currently, we also refuse to change the precise type used for an atomic load
|
|
|
|
/// or a volatile load. This is debatable, and might be reasonable to change
|
|
|
|
/// later. However, it is risky in case some backend or other part of LLVM is
|
|
|
|
/// relying on the exact type loaded to select appropriate atomic operations.
|
|
|
|
static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
|
|
|
|
// FIXME: We could probably with some care handle both volatile and atomic
|
|
|
|
// loads here but it isn't clear that this is important.
|
|
|
|
if (!LI.isSimple())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (LI.use_empty())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
|
|
|
|
// Fold away bit casts of the loaded value by loading the desired type.
|
|
|
|
if (LI.hasOneUse())
|
|
|
|
if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
|
2014-10-19 10:46:46 +00:00
|
|
|
LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy());
|
2014-10-18 06:36:22 +00:00
|
|
|
BC->replaceAllUsesWith(NewLoad);
|
|
|
|
IC.EraseInstFromFunction(*BC);
|
|
|
|
return &LI;
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
2014-10-18 06:36:22 +00:00
|
|
|
|
Revert r223764 which taught instcombine about integer-based elment extraction
patterns.
This is causing Clang to miscompile itself for 32-bit x86 somehow, and likely
also on ARM and PPC. I really don't know how, but reverting now that I've
confirmed this is actually the culprit. I have a reproduction as well and so
should be able to restore this shortly.
This reverts commit r223764.
Original commit log follows:
Teach instcombine to canonicalize "element extraction" from a load of an
integer and "element insertion" into a store of an integer into actual
element extraction, element insertion, and vector loads and stores.
Previously various parts of LLVM (including instcombine itself) would
introduce integer loads and stores into the code as a way of opaquely
loading and storing "bits". In some cases (such as a memcpy of
std::complex<float> object) we will eventually end up using those bits
in non-integer types. In order for SROA to effectively promote the
allocas involved, it splits these "store a bag of bits" integer loads
and stores up into the constituent parts. However, for non-alloca loads
and tsores which remain, it uses integer math to recombine the values
into a large integer to load or store.
All of this would be "fine", except that it forces LLVM to go through
integer math to combine and split up values. While this makes perfect
sense for integers (and in fact is critical for bitfields to end up
lowering efficiently) it is *terrible* for non-integer types, especially
floating point types. We have a much more canonical way of representing
the act of concatenating the bits of two SSA values in LLVM: a vector
and insertelement. This patch teaching InstCombine to use this
representation.
With this patch applied, LLVM will no longer introduce integer math into
the critical path of every loop over std::complex<float> operations such
as those that make up the hot path of ... oh, most HPC code, Eigen, and
any other heavy linear algebra library.
For the record, I looked *extensively* at fixing this in other parts of
the compiler, but it just doesn't work:
- We really do want to canonicalize memcpy and other bit-motion to
integer loads and stores. SSA values are tremendously more powerful
than "copy" intrinsics. Not doing this regresses massive amounts of
LLVM's scalar optimizer.
- We really do need to split up integer loads and stores of this form in
SROA or every memcpy of a trivially copyable struct will prevent SSA
formation of the members of that struct. It essentially turns off
SROA.
- The closest alternative is to actually split the loads and stores when
partitioning with SROA, but this has all of the downsides historically
discussed of splitting up loads and stores -- the wide-store
information is fundamentally lost. We would also see performance
regressions for bitfield-heavy code and other places where the
integers aren't really intended to be split without seemingly
arbitrary logic to treat integers totally differently.
- We *can* effectively fix this in instcombine, so it isn't that hard of
a choice to make IMO.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@223813 91177308-0d34-0410-b5e6-96231b3b80d8
2014-12-09 19:21:16 +00:00
|
|
|
// FIXME: We should also canonicalize loads of vectors when their elements are
|
|
|
|
// cast to other types.
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
|
|
|
Value *Op = LI.getOperand(0);
|
|
|
|
|
2014-10-18 06:36:22 +00:00
|
|
|
// Try to canonicalize the loaded type.
|
|
|
|
if (Instruction *Res = combineLoadToOperationType(*this, LI))
|
|
|
|
return Res;
|
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Attempt to improve the alignment.
|
2014-02-21 00:06:31 +00:00
|
|
|
if (DL) {
|
2015-01-04 12:03:27 +00:00
|
|
|
unsigned KnownAlign = getOrEnforceKnownAlignment(
|
|
|
|
Op, DL->getPrefTypeAlignment(LI.getType()), DL, AC, &LI, DT);
|
2010-08-03 18:20:32 +00:00
|
|
|
unsigned LoadAlign = LI.getAlignment();
|
|
|
|
unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
|
2014-02-21 00:06:31 +00:00
|
|
|
DL->getABITypeAlignment(LI.getType());
|
2010-08-03 18:20:32 +00:00
|
|
|
|
|
|
|
if (KnownAlign > EffectiveLoadAlign)
|
2010-01-05 05:57:49 +00:00
|
|
|
LI.setAlignment(KnownAlign);
|
2010-08-03 18:20:32 +00:00
|
|
|
else if (LoadAlign == 0)
|
|
|
|
LI.setAlignment(EffectiveLoadAlign);
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
2011-08-15 22:09:40 +00:00
|
|
|
// None of the following transforms are legal for volatile/atomic loads.
|
|
|
|
// FIXME: Some of it is okay for atomic loads; needs refactoring.
|
2014-04-25 05:29:35 +00:00
|
|
|
if (!LI.isSimple()) return nullptr;
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Do really simple store-to-load forwarding and load CSE, to catch cases
|
2011-02-15 09:23:02 +00:00
|
|
|
// where there are several consecutive memory accesses to the same location,
|
2010-01-05 05:57:49 +00:00
|
|
|
// separated by a few arithmetic operations.
|
|
|
|
BasicBlock::iterator BBI = &LI;
|
|
|
|
if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
|
2014-10-20 00:24:14 +00:00
|
|
|
return ReplaceInstUsesWith(
|
2014-11-25 08:20:27 +00:00
|
|
|
LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
|
|
|
|
LI.getName() + ".cast"));
|
2010-01-05 05:57:49 +00:00
|
|
|
|
|
|
|
// load(gep null, ...) -> unreachable
|
|
|
|
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
|
|
|
|
const Value *GEPI0 = GEPI->getOperand(0);
|
|
|
|
// TODO: Consider a target hook for valid address spaces for this xform.
|
|
|
|
if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
|
|
|
|
// Insert a new store to null instruction before the load to indicate
|
|
|
|
// that this code is not reachable. We do this instead of inserting
|
|
|
|
// an unreachable instruction directly because we cannot modify the
|
|
|
|
// CFG.
|
|
|
|
new StoreInst(UndefValue::get(LI.getType()),
|
|
|
|
Constant::getNullValue(Op->getType()), &LI);
|
|
|
|
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
|
|
|
}
|
2013-04-05 21:20:12 +00:00
|
|
|
}
|
2010-01-05 05:57:49 +00:00
|
|
|
|
|
|
|
// load null/undef -> unreachable
|
|
|
|
// TODO: Consider a target hook for valid address spaces for this xform.
|
|
|
|
if (isa<UndefValue>(Op) ||
|
|
|
|
(isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
|
|
|
|
// Insert a new store to null instruction before the load to indicate that
|
|
|
|
// this code is not reachable. We do this instead of inserting an
|
|
|
|
// unreachable instruction directly because we cannot modify the CFG.
|
|
|
|
new StoreInst(UndefValue::get(LI.getType()),
|
|
|
|
Constant::getNullValue(Op->getType()), &LI);
|
|
|
|
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Op->hasOneUse()) {
|
|
|
|
// Change select and PHI nodes to select values instead of addresses: this
|
|
|
|
// helps alias analysis out a lot, allows many others simplifications, and
|
|
|
|
// exposes redundancy in the code.
|
|
|
|
//
|
|
|
|
// Note that we cannot do the transformation unless we know that the
|
|
|
|
// introduced loads cannot trap! Something like this is valid as long as
|
|
|
|
// the condition is always false: load (select bool %C, int* null, int* %G),
|
|
|
|
// but it would not be valid if we transformed it to load from null
|
|
|
|
// unconditionally.
|
|
|
|
//
|
|
|
|
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
|
|
|
|
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
|
2010-01-30 04:42:39 +00:00
|
|
|
unsigned Align = LI.getAlignment();
|
2014-02-21 00:06:31 +00:00
|
|
|
if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
|
|
|
|
isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
|
2010-01-30 00:41:10 +00:00
|
|
|
LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
|
2010-01-30 04:42:39 +00:00
|
|
|
SI->getOperand(1)->getName()+".val");
|
2010-01-30 00:41:10 +00:00
|
|
|
LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
|
2010-01-30 04:42:39 +00:00
|
|
|
SI->getOperand(2)->getName()+".val");
|
|
|
|
V1->setAlignment(Align);
|
|
|
|
V2->setAlignment(Align);
|
2010-01-05 05:57:49 +00:00
|
|
|
return SelectInst::Create(SI->getCondition(), V1, V2);
|
|
|
|
}
|
|
|
|
|
|
|
|
// load (select (cond, null, P)) -> load P
|
2014-12-29 22:46:21 +00:00
|
|
|
if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
|
|
|
|
LI.getPointerAddressSpace() == 0) {
|
|
|
|
LI.setOperand(0, SI->getOperand(2));
|
|
|
|
return &LI;
|
|
|
|
}
|
2010-01-05 05:57:49 +00:00
|
|
|
|
|
|
|
// load (select (cond, P, null)) -> load P
|
2014-12-29 22:46:21 +00:00
|
|
|
if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
|
|
|
|
LI.getPointerAddressSpace() == 0) {
|
|
|
|
LI.setOperand(0, SI->getOperand(1));
|
|
|
|
return &LI;
|
|
|
|
}
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
}
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
2014-11-25 10:09:51 +00:00
|
|
|
/// \brief Combine stores to match the type of value being stored.
|
|
|
|
///
|
|
|
|
/// The core idea here is that the memory does not have any intrinsic type and
|
|
|
|
/// where we can we should match the type of a store to the type of value being
|
|
|
|
/// stored.
|
|
|
|
///
|
|
|
|
/// However, this routine must never change the width of a store or the number of
|
|
|
|
/// stores as that would introduce a semantic change. This combine is expected to
|
|
|
|
/// be a semantic no-op which just allows stores to more closely model the types
|
|
|
|
/// of their incoming values.
|
|
|
|
///
|
|
|
|
/// Currently, we also refuse to change the precise type used for an atomic or
|
|
|
|
/// volatile store. This is debatable, and might be reasonable to change later.
|
|
|
|
/// However, it is risky in case some backend or other part of LLVM is relying
|
|
|
|
/// on the exact type stored to select appropriate atomic operations.
|
|
|
|
///
|
|
|
|
/// \returns true if the store was successfully combined away. This indicates
|
|
|
|
/// the caller must erase the store instruction. We have to let the caller erase
|
|
|
|
/// the store instruction sas otherwise there is no way to signal whether it was
|
|
|
|
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
|
|
|
|
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
|
|
|
|
// FIXME: We could probably with some care handle both volatile and atomic
|
|
|
|
// stores here but it isn't clear that this is important.
|
|
|
|
if (!SI.isSimple())
|
|
|
|
return false;
|
2010-01-05 05:57:49 +00:00
|
|
|
|
Revert r223764 which taught instcombine about integer-based elment extraction
patterns.
This is causing Clang to miscompile itself for 32-bit x86 somehow, and likely
also on ARM and PPC. I really don't know how, but reverting now that I've
confirmed this is actually the culprit. I have a reproduction as well and so
should be able to restore this shortly.
This reverts commit r223764.
Original commit log follows:
Teach instcombine to canonicalize "element extraction" from a load of an
integer and "element insertion" into a store of an integer into actual
element extraction, element insertion, and vector loads and stores.
Previously various parts of LLVM (including instcombine itself) would
introduce integer loads and stores into the code as a way of opaquely
loading and storing "bits". In some cases (such as a memcpy of
std::complex<float> object) we will eventually end up using those bits
in non-integer types. In order for SROA to effectively promote the
allocas involved, it splits these "store a bag of bits" integer loads
and stores up into the constituent parts. However, for non-alloca loads
and tsores which remain, it uses integer math to recombine the values
into a large integer to load or store.
All of this would be "fine", except that it forces LLVM to go through
integer math to combine and split up values. While this makes perfect
sense for integers (and in fact is critical for bitfields to end up
lowering efficiently) it is *terrible* for non-integer types, especially
floating point types. We have a much more canonical way of representing
the act of concatenating the bits of two SSA values in LLVM: a vector
and insertelement. This patch teaching InstCombine to use this
representation.
With this patch applied, LLVM will no longer introduce integer math into
the critical path of every loop over std::complex<float> operations such
as those that make up the hot path of ... oh, most HPC code, Eigen, and
any other heavy linear algebra library.
For the record, I looked *extensively* at fixing this in other parts of
the compiler, but it just doesn't work:
- We really do want to canonicalize memcpy and other bit-motion to
integer loads and stores. SSA values are tremendously more powerful
than "copy" intrinsics. Not doing this regresses massive amounts of
LLVM's scalar optimizer.
- We really do need to split up integer loads and stores of this form in
SROA or every memcpy of a trivially copyable struct will prevent SSA
formation of the members of that struct. It essentially turns off
SROA.
- The closest alternative is to actually split the loads and stores when
partitioning with SROA, but this has all of the downsides historically
discussed of splitting up loads and stores -- the wide-store
information is fundamentally lost. We would also see performance
regressions for bitfield-heavy code and other places where the
integers aren't really intended to be split without seemingly
arbitrary logic to treat integers totally differently.
- We *can* effectively fix this in instcombine, so it isn't that hard of
a choice to make IMO.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@223813 91177308-0d34-0410-b5e6-96231b3b80d8
2014-12-09 19:21:16 +00:00
|
|
|
Value *Ptr = SI.getPointerOperand();
|
2014-11-25 10:09:51 +00:00
|
|
|
Value *V = SI.getValueOperand();
|
Revert r223764 which taught instcombine about integer-based elment extraction
patterns.
This is causing Clang to miscompile itself for 32-bit x86 somehow, and likely
also on ARM and PPC. I really don't know how, but reverting now that I've
confirmed this is actually the culprit. I have a reproduction as well and so
should be able to restore this shortly.
This reverts commit r223764.
Original commit log follows:
Teach instcombine to canonicalize "element extraction" from a load of an
integer and "element insertion" into a store of an integer into actual
element extraction, element insertion, and vector loads and stores.
Previously various parts of LLVM (including instcombine itself) would
introduce integer loads and stores into the code as a way of opaquely
loading and storing "bits". In some cases (such as a memcpy of
std::complex<float> object) we will eventually end up using those bits
in non-integer types. In order for SROA to effectively promote the
allocas involved, it splits these "store a bag of bits" integer loads
and stores up into the constituent parts. However, for non-alloca loads
and tsores which remain, it uses integer math to recombine the values
into a large integer to load or store.
All of this would be "fine", except that it forces LLVM to go through
integer math to combine and split up values. While this makes perfect
sense for integers (and in fact is critical for bitfields to end up
lowering efficiently) it is *terrible* for non-integer types, especially
floating point types. We have a much more canonical way of representing
the act of concatenating the bits of two SSA values in LLVM: a vector
and insertelement. This patch teaching InstCombine to use this
representation.
With this patch applied, LLVM will no longer introduce integer math into
the critical path of every loop over std::complex<float> operations such
as those that make up the hot path of ... oh, most HPC code, Eigen, and
any other heavy linear algebra library.
For the record, I looked *extensively* at fixing this in other parts of
the compiler, but it just doesn't work:
- We really do want to canonicalize memcpy and other bit-motion to
integer loads and stores. SSA values are tremendously more powerful
than "copy" intrinsics. Not doing this regresses massive amounts of
LLVM's scalar optimizer.
- We really do need to split up integer loads and stores of this form in
SROA or every memcpy of a trivially copyable struct will prevent SSA
formation of the members of that struct. It essentially turns off
SROA.
- The closest alternative is to actually split the loads and stores when
partitioning with SROA, but this has all of the downsides historically
discussed of splitting up loads and stores -- the wide-store
information is fundamentally lost. We would also see performance
regressions for bitfield-heavy code and other places where the
integers aren't really intended to be split without seemingly
arbitrary logic to treat integers totally differently.
- We *can* effectively fix this in instcombine, so it isn't that hard of
a choice to make IMO.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@223813 91177308-0d34-0410-b5e6-96231b3b80d8
2014-12-09 19:21:16 +00:00
|
|
|
unsigned AS = SI.getPointerAddressSpace();
|
|
|
|
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
|
|
|
|
SI.getAllMetadata(MD);
|
2014-11-25 10:09:51 +00:00
|
|
|
|
|
|
|
// Fold away bit casts of the stored value by storing the original type.
|
|
|
|
if (auto *BC = dyn_cast<BitCastInst>(V)) {
|
Revert r223764 which taught instcombine about integer-based elment extraction
patterns.
This is causing Clang to miscompile itself for 32-bit x86 somehow, and likely
also on ARM and PPC. I really don't know how, but reverting now that I've
confirmed this is actually the culprit. I have a reproduction as well and so
should be able to restore this shortly.
This reverts commit r223764.
Original commit log follows:
Teach instcombine to canonicalize "element extraction" from a load of an
integer and "element insertion" into a store of an integer into actual
element extraction, element insertion, and vector loads and stores.
Previously various parts of LLVM (including instcombine itself) would
introduce integer loads and stores into the code as a way of opaquely
loading and storing "bits". In some cases (such as a memcpy of
std::complex<float> object) we will eventually end up using those bits
in non-integer types. In order for SROA to effectively promote the
allocas involved, it splits these "store a bag of bits" integer loads
and stores up into the constituent parts. However, for non-alloca loads
and tsores which remain, it uses integer math to recombine the values
into a large integer to load or store.
All of this would be "fine", except that it forces LLVM to go through
integer math to combine and split up values. While this makes perfect
sense for integers (and in fact is critical for bitfields to end up
lowering efficiently) it is *terrible* for non-integer types, especially
floating point types. We have a much more canonical way of representing
the act of concatenating the bits of two SSA values in LLVM: a vector
and insertelement. This patch teaching InstCombine to use this
representation.
With this patch applied, LLVM will no longer introduce integer math into
the critical path of every loop over std::complex<float> operations such
as those that make up the hot path of ... oh, most HPC code, Eigen, and
any other heavy linear algebra library.
For the record, I looked *extensively* at fixing this in other parts of
the compiler, but it just doesn't work:
- We really do want to canonicalize memcpy and other bit-motion to
integer loads and stores. SSA values are tremendously more powerful
than "copy" intrinsics. Not doing this regresses massive amounts of
LLVM's scalar optimizer.
- We really do need to split up integer loads and stores of this form in
SROA or every memcpy of a trivially copyable struct will prevent SSA
formation of the members of that struct. It essentially turns off
SROA.
- The closest alternative is to actually split the loads and stores when
partitioning with SROA, but this has all of the downsides historically
discussed of splitting up loads and stores -- the wide-store
information is fundamentally lost. We would also see performance
regressions for bitfield-heavy code and other places where the
integers aren't really intended to be split without seemingly
arbitrary logic to treat integers totally differently.
- We *can* effectively fix this in instcombine, so it isn't that hard of
a choice to make IMO.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@223813 91177308-0d34-0410-b5e6-96231b3b80d8
2014-12-09 19:21:16 +00:00
|
|
|
V = BC->getOperand(0);
|
|
|
|
StoreInst *NewStore = IC.Builder->CreateAlignedStore(
|
|
|
|
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
|
|
|
|
SI.getAlignment());
|
|
|
|
for (const auto &MDPair : MD) {
|
|
|
|
unsigned ID = MDPair.first;
|
|
|
|
MDNode *N = MDPair.second;
|
|
|
|
// Note, essentially every kind of metadata should be preserved here! This
|
|
|
|
// routine is supposed to clone a store instruction changing *only its
|
|
|
|
// type*. The only metadata it makes sense to drop is metadata which is
|
|
|
|
// invalidated when the pointer type changes. This should essentially
|
|
|
|
// never be the case in LLVM, but we explicitly switch over only known
|
|
|
|
// metadata to be conservatively correct. If you are adding metadata to
|
|
|
|
// LLVM which pertains to stores, you almost certainly want to add it
|
|
|
|
// here.
|
|
|
|
switch (ID) {
|
|
|
|
case LLVMContext::MD_dbg:
|
|
|
|
case LLVMContext::MD_tbaa:
|
|
|
|
case LLVMContext::MD_prof:
|
|
|
|
case LLVMContext::MD_fpmath:
|
|
|
|
case LLVMContext::MD_tbaa_struct:
|
|
|
|
case LLVMContext::MD_alias_scope:
|
|
|
|
case LLVMContext::MD_noalias:
|
|
|
|
case LLVMContext::MD_nontemporal:
|
|
|
|
case LLVMContext::MD_mem_parallel_loop_access:
|
|
|
|
case LLVMContext::MD_nonnull:
|
|
|
|
// All of these directly apply.
|
|
|
|
NewStore->setMetadata(ID, N);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LLVMContext::MD_invariant_load:
|
|
|
|
case LLVMContext::MD_range:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-11-25 10:09:51 +00:00
|
|
|
return true;
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2014-11-25 10:09:51 +00:00
|
|
|
// FIXME: We should also canonicalize loads of vectors when their elements are
|
|
|
|
// cast to other types.
|
|
|
|
return false;
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// equivalentAddressValues - Test if A and B will obviously have the same
|
|
|
|
/// value. This includes recognizing that %t0 and %t1 will have the same
|
|
|
|
/// value in code like this:
|
|
|
|
/// %t0 = getelementptr \@a, 0, 3
|
|
|
|
/// store i32 0, i32* %t0
|
|
|
|
/// %t1 = getelementptr \@a, 0, 3
|
|
|
|
/// %t2 = load i32* %t1
|
|
|
|
///
|
|
|
|
static bool equivalentAddressValues(Value *A, Value *B) {
|
|
|
|
// Test if the values are trivially equivalent.
|
|
|
|
if (A == B) return true;
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Test if the values come form identical arithmetic instructions.
|
|
|
|
// This uses isIdenticalToWhenDefined instead of isIdenticalTo because
|
|
|
|
// its only used to compare two uses within the same basic block, which
|
|
|
|
// means that they'll always either have the same value or one of them
|
|
|
|
// will have an undefined value.
|
|
|
|
if (isa<BinaryOperator>(A) ||
|
|
|
|
isa<CastInst>(A) ||
|
|
|
|
isa<PHINode>(A) ||
|
|
|
|
isa<GetElementPtrInst>(A))
|
|
|
|
if (Instruction *BI = dyn_cast<Instruction>(B))
|
|
|
|
if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
|
|
|
|
return true;
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Otherwise they may not be equivalent.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|
|
|
Value *Val = SI.getOperand(0);
|
|
|
|
Value *Ptr = SI.getOperand(1);
|
|
|
|
|
2014-11-25 10:09:51 +00:00
|
|
|
// Try to canonicalize the stored type.
|
|
|
|
if (combineStoreToValueType(*this, SI))
|
|
|
|
return EraseInstFromFunction(SI);
|
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Attempt to improve the alignment.
|
2014-02-21 00:06:31 +00:00
|
|
|
if (DL) {
|
2015-01-04 12:03:27 +00:00
|
|
|
unsigned KnownAlign = getOrEnforceKnownAlignment(
|
|
|
|
Ptr, DL->getPrefTypeAlignment(Val->getType()), DL, AC, &SI, DT);
|
2010-08-03 18:20:32 +00:00
|
|
|
unsigned StoreAlign = SI.getAlignment();
|
|
|
|
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
|
2014-02-21 00:06:31 +00:00
|
|
|
DL->getABITypeAlignment(Val->getType());
|
2010-08-03 18:20:32 +00:00
|
|
|
|
2012-03-16 18:20:54 +00:00
|
|
|
if (KnownAlign > EffectiveStoreAlign)
|
2010-01-05 05:57:49 +00:00
|
|
|
SI.setAlignment(KnownAlign);
|
2012-03-16 18:20:54 +00:00
|
|
|
else if (StoreAlign == 0)
|
|
|
|
SI.setAlignment(EffectiveStoreAlign);
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
2011-08-15 22:09:40 +00:00
|
|
|
// Don't hack volatile/atomic stores.
|
|
|
|
// FIXME: Some bits are legal for atomic stores; needs refactoring.
|
2014-04-25 05:29:35 +00:00
|
|
|
if (!SI.isSimple()) return nullptr;
|
2011-08-15 22:09:40 +00:00
|
|
|
|
|
|
|
// If the RHS is an alloca with a single use, zapify the store, making the
|
|
|
|
// alloca dead.
|
|
|
|
if (Ptr->hasOneUse()) {
|
2013-04-05 21:20:12 +00:00
|
|
|
if (isa<AllocaInst>(Ptr))
|
2011-08-15 22:09:40 +00:00
|
|
|
return EraseInstFromFunction(SI);
|
|
|
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
|
|
|
|
if (isa<AllocaInst>(GEP->getOperand(0))) {
|
|
|
|
if (GEP->getOperand(0)->hasOneUse())
|
|
|
|
return EraseInstFromFunction(SI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Do really simple DSE, to catch cases where there are several consecutive
|
|
|
|
// stores to the same location, separated by a few arithmetic operations. This
|
|
|
|
// situation often occurs with bitfield accesses.
|
|
|
|
BasicBlock::iterator BBI = &SI;
|
|
|
|
for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
|
|
|
|
--ScanInsts) {
|
|
|
|
--BBI;
|
2010-01-22 19:05:05 +00:00
|
|
|
// Don't count debug info directives, lest they affect codegen,
|
|
|
|
// and we skip pointer-to-pointer bitcasts, which are NOPs.
|
|
|
|
if (isa<DbgInfoIntrinsic>(BBI) ||
|
2010-02-16 11:11:14 +00:00
|
|
|
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
|
2010-01-05 05:57:49 +00:00
|
|
|
ScanInsts++;
|
|
|
|
continue;
|
2013-04-05 21:20:12 +00:00
|
|
|
}
|
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
|
|
|
|
// Prev store isn't volatile, and stores to the same location?
|
2011-08-15 22:09:40 +00:00
|
|
|
if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
|
|
|
|
SI.getOperand(1))) {
|
2010-01-05 05:57:49 +00:00
|
|
|
++NumDeadStore;
|
|
|
|
++BBI;
|
|
|
|
EraseInstFromFunction(*PrevSI);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// If this is a load, we have to stop. However, if the loaded value is from
|
|
|
|
// the pointer we're loading and is producing the pointer we're storing,
|
|
|
|
// then *this* store is dead (X = load P; store X -> P).
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
|
2011-03-14 01:21:00 +00:00
|
|
|
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
|
2011-08-15 22:09:40 +00:00
|
|
|
LI->isSimple())
|
2011-03-14 01:21:00 +00:00
|
|
|
return EraseInstFromFunction(SI);
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Otherwise, this is a load from some other location. Stores before it
|
|
|
|
// may not be dead.
|
|
|
|
break;
|
|
|
|
}
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Don't skip over loads or things that can modify memory.
|
|
|
|
if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// store X, null -> turns into 'unreachable' in SimplifyCFG
|
|
|
|
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
|
|
|
|
if (!isa<UndefValue>(Val)) {
|
|
|
|
SI.setOperand(0, UndefValue::get(Val->getType()));
|
|
|
|
if (Instruction *U = dyn_cast<Instruction>(Val))
|
|
|
|
Worklist.Add(U); // Dropped a use.
|
|
|
|
}
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr; // Do not modify these!
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// store undef, Ptr -> noop
|
|
|
|
if (isa<UndefValue>(Val))
|
|
|
|
return EraseInstFromFunction(SI);
|
|
|
|
|
|
|
|
// If this store is the last instruction in the basic block (possibly
|
2010-01-21 23:07:15 +00:00
|
|
|
// excepting debug info instructions), and if the block ends with an
|
|
|
|
// unconditional branch, try to move it to the successor block.
|
2013-04-05 21:20:12 +00:00
|
|
|
BBI = &SI;
|
2010-01-05 05:57:49 +00:00
|
|
|
do {
|
|
|
|
++BBI;
|
2010-01-22 19:05:05 +00:00
|
|
|
} while (isa<DbgInfoIntrinsic>(BBI) ||
|
2010-02-16 11:11:14 +00:00
|
|
|
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
|
2010-01-05 05:57:49 +00:00
|
|
|
if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
|
|
|
|
if (BI->isUnconditional())
|
|
|
|
if (SimplifyStoreAtEndOfBlock(SI))
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr; // xform done!
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// SimplifyStoreAtEndOfBlock - Turn things like:
|
|
|
|
/// if () { *P = v1; } else { *P = v2 }
|
|
|
|
/// into a phi node with a store in the successor.
|
|
|
|
///
|
|
|
|
/// Simplify things like:
|
|
|
|
/// *P = v1; if () { *P = v2; }
|
|
|
|
/// into a phi node with a store in the successor.
|
|
|
|
///
|
|
|
|
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
|
|
|
BasicBlock *StoreBB = SI.getParent();
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Check to see if the successor block has exactly two incoming edges. If
|
|
|
|
// so, see if the other predecessor contains a store to the same location.
|
|
|
|
// if so, insert a PHI node (if needed) and move the stores down.
|
|
|
|
BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Determine whether Dest has exactly two predecessors and, if so, compute
|
|
|
|
// the other predecessor.
|
|
|
|
pred_iterator PI = pred_begin(DestBB);
|
2010-07-12 15:48:26 +00:00
|
|
|
BasicBlock *P = *PI;
|
2014-04-25 05:29:35 +00:00
|
|
|
BasicBlock *OtherBB = nullptr;
|
2010-07-12 15:48:26 +00:00
|
|
|
|
|
|
|
if (P != StoreBB)
|
|
|
|
OtherBB = P;
|
|
|
|
|
|
|
|
if (++PI == pred_end(DestBB))
|
2010-01-05 05:57:49 +00:00
|
|
|
return false;
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-07-12 15:48:26 +00:00
|
|
|
P = *PI;
|
|
|
|
if (P != StoreBB) {
|
2010-01-05 05:57:49 +00:00
|
|
|
if (OtherBB)
|
|
|
|
return false;
|
2010-07-12 15:48:26 +00:00
|
|
|
OtherBB = P;
|
2010-01-05 05:57:49 +00:00
|
|
|
}
|
|
|
|
if (++PI != pred_end(DestBB))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Bail out if all the relevant blocks aren't distinct (this can happen,
|
|
|
|
// for example, if SI is in an infinite loop)
|
|
|
|
if (StoreBB == DestBB || OtherBB == DestBB)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Verify that the other block ends in a branch and is not otherwise empty.
|
|
|
|
BasicBlock::iterator BBI = OtherBB->getTerminator();
|
|
|
|
BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
|
|
|
|
if (!OtherBr || BBI == OtherBB->begin())
|
|
|
|
return false;
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// If the other block ends in an unconditional branch, check for the 'if then
|
|
|
|
// else' case. there is an instruction before the branch.
|
2014-04-25 05:29:35 +00:00
|
|
|
StoreInst *OtherStore = nullptr;
|
2010-01-05 05:57:49 +00:00
|
|
|
if (OtherBr->isUnconditional()) {
|
|
|
|
--BBI;
|
|
|
|
// Skip over debugging info.
|
2010-01-22 19:05:05 +00:00
|
|
|
while (isa<DbgInfoIntrinsic>(BBI) ||
|
2010-02-16 11:11:14 +00:00
|
|
|
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
|
2010-01-05 05:57:49 +00:00
|
|
|
if (BBI==OtherBB->begin())
|
|
|
|
return false;
|
|
|
|
--BBI;
|
|
|
|
}
|
2011-08-15 22:09:40 +00:00
|
|
|
// If this isn't a store, isn't a store to the same location, or is not the
|
|
|
|
// right kind of store, bail out.
|
2010-01-05 05:57:49 +00:00
|
|
|
OtherStore = dyn_cast<StoreInst>(BBI);
|
|
|
|
if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
|
2011-08-15 22:09:40 +00:00
|
|
|
!SI.isSameOperationAs(OtherStore))
|
2010-01-05 05:57:49 +00:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// Otherwise, the other block ended with a conditional branch. If one of the
|
|
|
|
// destinations is StoreBB, then we have the if/then case.
|
2013-04-05 21:20:12 +00:00
|
|
|
if (OtherBr->getSuccessor(0) != StoreBB &&
|
2010-01-05 05:57:49 +00:00
|
|
|
OtherBr->getSuccessor(1) != StoreBB)
|
|
|
|
return false;
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
|
|
|
|
// if/then triangle. See if there is a store to the same ptr as SI that
|
|
|
|
// lives in OtherBB.
|
|
|
|
for (;; --BBI) {
|
|
|
|
// Check to see if we find the matching store.
|
|
|
|
if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
|
|
|
|
if (OtherStore->getOperand(1) != SI.getOperand(1) ||
|
2011-08-15 22:09:40 +00:00
|
|
|
!SI.isSameOperationAs(OtherStore))
|
2010-01-05 05:57:49 +00:00
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// If we find something that may be using or overwriting the stored
|
|
|
|
// value, or if we run out of instructions, we can't do the xform.
|
|
|
|
if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
|
|
|
|
BBI == OtherBB->begin())
|
|
|
|
return false;
|
|
|
|
}
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// In order to eliminate the store in OtherBr, we have to
|
|
|
|
// make sure nothing reads or overwrites the stored value in
|
|
|
|
// StoreBB.
|
|
|
|
for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
|
|
|
|
// FIXME: This should really be AA driven.
|
|
|
|
if (I->mayReadFromMemory() || I->mayWriteToMemory())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Insert a PHI node now if we need it.
|
|
|
|
Value *MergedVal = OtherStore->getOperand(0);
|
|
|
|
if (MergedVal != SI.getOperand(0)) {
|
2011-03-30 11:28:46 +00:00
|
|
|
PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
|
2010-01-05 05:57:49 +00:00
|
|
|
PN->addIncoming(SI.getOperand(0), SI.getParent());
|
|
|
|
PN->addIncoming(OtherStore->getOperand(0), OtherBB);
|
|
|
|
MergedVal = InsertNewInstBefore(PN, DestBB->front());
|
|
|
|
}
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Advance to a place where it is safe to insert the new store and
|
|
|
|
// insert it.
|
2011-08-16 20:45:24 +00:00
|
|
|
BBI = DestBB->getFirstInsertionPt();
|
2011-05-27 00:19:40 +00:00
|
|
|
StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
|
2011-08-15 22:09:40 +00:00
|
|
|
SI.isVolatile(),
|
|
|
|
SI.getAlignment(),
|
|
|
|
SI.getOrdering(),
|
|
|
|
SI.getSynchScope());
|
2011-05-27 00:19:40 +00:00
|
|
|
InsertNewInstBefore(NewSI, *BBI);
|
2013-04-05 21:20:12 +00:00
|
|
|
NewSI->setDebugLoc(OtherStore->getDebugLoc());
|
2011-05-27 00:19:40 +00:00
|
|
|
|
2014-07-24 12:16:19 +00:00
|
|
|
// If the two stores had AA tags, merge them.
|
|
|
|
AAMDNodes AATags;
|
|
|
|
SI.getAAMetadata(AATags);
|
|
|
|
if (AATags) {
|
|
|
|
OtherStore->getAAMetadata(AATags, /* Merge = */ true);
|
|
|
|
NewSI->setAAMetadata(AATags);
|
|
|
|
}
|
2013-04-05 21:20:12 +00:00
|
|
|
|
2010-01-05 05:57:49 +00:00
|
|
|
// Nuke the old stores.
|
|
|
|
EraseInstFromFunction(SI);
|
|
|
|
EraseInstFromFunction(*OtherStore);
|
|
|
|
return true;
|
|
|
|
}
|