Replace the core comparison login in merge functions. We can now merge

vector<>::push_back() in:

  int foo(vector<int> &a, vector<unsigned> &b) {
    a.push_back(10);
    b.push_back(11);
  }

to two calls to the same push_back function, or fold away the two copies of
push_back() in:

  struct T { int; };
  struct S { char; };
  vector<T*> t;
  vector<S*> s;
  void f(T *x) { t.push_back(x); }
  void g(S *x) { s.push_back(x); }

but leave f() and g() separate, since they refer to two different global
variables.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@103698 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nick Lewycky 2010-05-13 05:48:45 +00:00
parent 054be92e1d
commit 33ab0b1568

View File

@ -17,32 +17,55 @@
// important that the hash function be high quality. The equality comparison
// iterates through each instruction in each basic block.
//
// When a match is found, the functions are folded. We can only fold two
// functions when we know that the definition of one of them is not
// overridable.
// When a match is found the functions are folded. If both functions are
// overridable, we move the functionality into a new internal function and
// leave two overridable thunks to it.
//
//===----------------------------------------------------------------------===//
//
// Future work:
//
// * fold vector<T*>::push_back and vector<S*>::push_back.
//
// These two functions have different types, but in a way that doesn't matter
// to us. As long as we never see an S or T itself, using S* and S** is the
// same as using a T* and T**.
//
// * virtual functions.
//
// Many functions have their address taken by the virtual function table for
// the object they belong to. However, as long as it's only used for a lookup
// and call, this is irrelevant, and we'd like to fold such implementations.
//
// * use SCC to cut down on pair-wise comparisons and solve larger cycles.
//
// The current implementation loops over a pair-wise comparison of all
// functions in the program where the two functions in the pair are treated as
// assumed to be equal until proven otherwise. We could both use fewer
// comparisons and optimize more complex cases if we used strongly connected
// components of the call graph.
//
// * be smarter about bitcast.
//
// In order to fold functions, we will sometimes add either bitcast instructions
// or bitcast constant expressions. Unfortunately, this can confound further
// analysis since the two functions differ where one has a bitcast and the
// other doesn't. We should learn to peer through bitcasts without imposing bad
// performance properties.
//
// * don't emit aliases for Mach-O.
//
// Mach-O doesn't support aliases which means that we must avoid introducing
// them in the bitcode on architectures which don't support them, such as
// Mac OSX. There's a few approaches to this problem;
// a) teach codegen to lower global aliases to thunks on platforms which don't
// support them.
// b) always emit thunks, and create a separate thunk-to-alias pass which
// runs on ELF systems. This has the added benefit of transforming other
// thunks such as those produced by a C++ frontend into aliases when legal
// to do so.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "mergefunc"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Constants.h"
#include "llvm/InlineAsm.h"
@ -54,6 +77,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include <map>
#include <vector>
using namespace llvm;
@ -61,17 +85,33 @@ using namespace llvm;
STATISTIC(NumFunctionsMerged, "Number of functions merged");
namespace {
struct MergeFunctions : public ModulePass {
class MergeFunctions : public ModulePass {
public:
static char ID; // Pass identification, replacement for typeid
MergeFunctions() : ModulePass(&ID) {}
bool runOnModule(Module &M);
private:
bool isEquivalentGEP(const GetElementPtrInst *GEP1,
const GetElementPtrInst *GEP2);
bool equals(const BasicBlock *BB1, const BasicBlock *BB2);
bool equals(const Function *F, const Function *G);
bool compare(const Value *V1, const Value *V2);
const Function *LHS, *RHS;
typedef DenseMap<const Value *, unsigned long> IDMap;
IDMap Map;
DenseMap<const Function *, IDMap> Domains;
DenseMap<const Function *, unsigned long> DomainCount;
TargetData *TD;
};
}
char MergeFunctions::ID = 0;
static RegisterPass<MergeFunctions>
X("mergefunc", "Merge Functions");
static RegisterPass<MergeFunctions> X("mergefunc", "Merge Functions");
ModulePass *llvm::createMergeFunctionsPass() {
return new MergeFunctions();
@ -95,15 +135,6 @@ static unsigned long hash(const Function *F) {
return ID.ComputeHash();
}
/// IgnoreBitcasts - given a bitcast, returns the first non-bitcast found by
/// walking the chain of cast operands. Otherwise, returns the argument.
static Value* IgnoreBitcasts(Value *V) {
while (BitCastInst *BC = dyn_cast<BitCastInst>(V))
V = BC->getOperand(0);
return V;
}
/// isEquivalentType - any two pointers are equivalent. Otherwise, standard
/// type equivalence rules apply.
static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
@ -113,6 +144,14 @@ static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
return false;
switch(Ty1->getTypeID()) {
default:
llvm_unreachable("Unknown type!");
// Fall through in Release-Asserts mode.
case Type::IntegerTyID:
case Type::OpaqueTyID:
// Ty1 == Ty2 would have returned true earlier.
return false;
case Type::VoidTyID:
case Type::FloatTyID:
case Type::DoubleTyID:
@ -123,15 +162,6 @@ static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
case Type::MetadataTyID:
return true;
case Type::IntegerTyID:
case Type::OpaqueTyID:
// Ty1 == Ty2 would have returned true earlier.
return false;
default:
llvm_unreachable("Unknown type!");
return false;
case Type::PointerTyID: {
const PointerType *PTy1 = cast<PointerType>(Ty1);
const PointerType *PTy2 = cast<PointerType>(Ty2);
@ -154,6 +184,21 @@ static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
return true;
}
case Type::UnionTyID: {
const UnionType *UTy1 = cast<UnionType>(Ty1);
const UnionType *UTy2 = cast<UnionType>(Ty2);
// TODO: we could be fancy with union(A, union(A, B)) === union(A, B), etc.
if (UTy1->getNumElements() != UTy2->getNumElements())
return false;
for (unsigned i = 0, e = UTy1->getNumElements(); i != e; ++i) {
if (!isEquivalentType(UTy1->getElementType(i), UTy2->getElementType(i)))
return false;
}
return true;
}
case Type::FunctionTyID: {
const FunctionType *FTy1 = cast<FunctionType>(Ty1);
const FunctionType *FTy2 = cast<FunctionType>(Ty2);
@ -236,123 +281,136 @@ isEquivalentOperation(const Instruction *I1, const Instruction *I2) {
return true;
}
static bool compare(const Value *V, const Value *U) {
assert(!isa<BasicBlock>(V) && !isa<BasicBlock>(U) &&
"Must not compare basic blocks.");
assert(isEquivalentType(V->getType(), U->getType()) &&
"Two of the same operation have operands of different type.");
// TODO: If the constant is an expression of F, we should accept that it's
// equal to the same expression in terms of G.
if (isa<Constant>(V))
return V == U;
// The caller has ensured that ValueMap[V] != U. Since Arguments are
// pre-loaded into the ValueMap, and Instructions are added as we go, we know
// that this can only be a mis-match.
if (isa<Instruction>(V) || isa<Argument>(V))
return false;
if (isa<InlineAsm>(V) && isa<InlineAsm>(U)) {
const InlineAsm *IAF = cast<InlineAsm>(V);
const InlineAsm *IAG = cast<InlineAsm>(U);
return IAF->getAsmString() == IAG->getAsmString() &&
IAF->getConstraintString() == IAG->getConstraintString();
bool MergeFunctions::isEquivalentGEP(const GetElementPtrInst *GEP1,
const GetElementPtrInst *GEP2) {
if (TD && GEP1->hasAllConstantIndices() && GEP2->hasAllConstantIndices()) {
SmallVector<Value *, 8> Indices1, Indices2;
for (GetElementPtrInst::const_op_iterator I = GEP1->idx_begin(),
E = GEP1->idx_end(); I != E; ++I) {
Indices1.push_back(*I);
}
for (GetElementPtrInst::const_op_iterator I = GEP2->idx_begin(),
E = GEP2->idx_end(); I != E; ++I) {
Indices2.push_back(*I);
}
uint64_t Offset1 = TD->getIndexedOffset(GEP1->getPointerOperandType(),
Indices1.data(), Indices1.size());
uint64_t Offset2 = TD->getIndexedOffset(GEP2->getPointerOperandType(),
Indices2.data(), Indices2.size());
return Offset1 == Offset2;
}
return false;
// Equivalent types aren't enough.
if (GEP1->getPointerOperand()->getType() !=
GEP2->getPointerOperand()->getType())
return false;
if (GEP1->getNumOperands() != GEP2->getNumOperands())
return false;
for (unsigned i = 0, e = GEP1->getNumOperands(); i != e; ++i) {
if (!compare(GEP1->getOperand(i), GEP2->getOperand(i)))
return false;
}
return true;
}
static bool equals(const BasicBlock *BB1, const BasicBlock *BB2,
DenseMap<const Value *, const Value *> &ValueMap,
DenseMap<const Value *, const Value *> &SpeculationMap) {
// Speculatively add it anyways. If it's false, we'll notice a difference
// later, and this won't matter.
ValueMap[BB1] = BB2;
bool MergeFunctions::compare(const Value *V1, const Value *V2) {
if (V1 == LHS || V1 == RHS)
if (V2 == LHS || V2 == RHS)
return true;
// TODO: constant expressions in terms of LHS and RHS
if (isa<Constant>(V1))
return V1 == V2;
if (isa<InlineAsm>(V1) && isa<InlineAsm>(V2)) {
const InlineAsm *IA1 = cast<InlineAsm>(V1);
const InlineAsm *IA2 = cast<InlineAsm>(V2);
return IA1->getAsmString() == IA2->getAsmString() &&
IA1->getConstraintString() == IA2->getConstraintString();
}
// We enumerate constants globally and arguments, basic blocks or
// instructions within the function they belong to.
const Function *Domain1 = NULL;
if (const Argument *A = dyn_cast<Argument>(V1)) {
Domain1 = A->getParent();
} else if (const BasicBlock *BB = dyn_cast<BasicBlock>(V1)) {
Domain1 = BB->getParent();
} else if (const Instruction *I = dyn_cast<Instruction>(V1)) {
Domain1 = I->getParent()->getParent();
}
const Function *Domain2 = NULL;
if (const Argument *A = dyn_cast<Argument>(V2)) {
Domain2 = A->getParent();
} else if (const BasicBlock *BB = dyn_cast<BasicBlock>(V2)) {
Domain2 = BB->getParent();
} else if (const Instruction *I = dyn_cast<Instruction>(V2)) {
Domain2 = I->getParent()->getParent();
}
if (Domain1 != Domain2)
if (Domain1 != LHS && Domain1 != RHS)
if (Domain2 != LHS && Domain2 != RHS)
return false;
IDMap &Map1 = Domains[Domain1];
unsigned long &ID1 = Map1[V1];
if (!ID1)
ID1 = ++DomainCount[Domain1];
IDMap &Map2 = Domains[Domain2];
unsigned long &ID2 = Map2[V2];
if (!ID2)
ID2 = ++DomainCount[Domain2];
return ID1 == ID2;
}
bool MergeFunctions::equals(const BasicBlock *BB1, const BasicBlock *BB2) {
BasicBlock::const_iterator FI = BB1->begin(), FE = BB1->end();
BasicBlock::const_iterator GI = BB2->begin(), GE = BB2->end();
do {
if (isa<BitCastInst>(FI)) {
++FI;
continue;
}
if (isa<BitCastInst>(GI)) {
++GI;
continue;
}
if (!isEquivalentOperation(FI, GI))
if (!compare(FI, GI))
return false;
if (isa<GetElementPtrInst>(FI)) {
const GetElementPtrInst *GEPF = cast<GetElementPtrInst>(FI);
const GetElementPtrInst *GEPG = cast<GetElementPtrInst>(GI);
if (GEPF->hasAllZeroIndices() && GEPG->hasAllZeroIndices()) {
// It's effectively a bitcast.
++FI, ++GI;
continue;
}
if (isa<GetElementPtrInst>(FI) && isa<GetElementPtrInst>(GI)) {
const GetElementPtrInst *GEP1 = cast<GetElementPtrInst>(FI);
const GetElementPtrInst *GEP2 = cast<GetElementPtrInst>(GI);
// TODO: we only really care about the elements before the index
if (FI->getOperand(0)->getType() != GI->getOperand(0)->getType())
return false;
}
if (!compare(GEP1->getPointerOperand(), GEP2->getPointerOperand()))
return false;
if (ValueMap[FI] == GI) {
++FI, ++GI;
continue;
}
if (ValueMap[FI] != NULL)
return false;
for (unsigned i = 0, e = FI->getNumOperands(); i != e; ++i) {
Value *OpF = IgnoreBitcasts(FI->getOperand(i));
Value *OpG = IgnoreBitcasts(GI->getOperand(i));
if (ValueMap[OpF] == OpG)
continue;
if (ValueMap[OpF] != NULL)
if (!isEquivalentGEP(GEP1, GEP2))
return false;
} else {
if (!isEquivalentOperation(FI, GI))
return false;
if (OpF->getValueID() != OpG->getValueID() ||
!isEquivalentType(OpF->getType(), OpG->getType()))
return false;
for (unsigned i = 0, e = FI->getNumOperands(); i != e; ++i) {
Value *OpF = FI->getOperand(i);
Value *OpG = GI->getOperand(i);
if (isa<PHINode>(FI)) {
if (SpeculationMap[OpF] == NULL)
SpeculationMap[OpF] = OpG;
else if (SpeculationMap[OpF] != OpG)
return false;
continue;
} else if (isa<BasicBlock>(OpF)) {
assert(isa<TerminatorInst>(FI) &&
"BasicBlock referenced by non-Terminator non-PHI");
// This call changes the ValueMap, hence we can't use
// Value *& = ValueMap[...]
if (!equals(cast<BasicBlock>(OpF), cast<BasicBlock>(OpG), ValueMap,
SpeculationMap))
return false;
} else {
if (!compare(OpF, OpG))
if (!compare(OpF, OpG))
return false;
if (OpF->getValueID() != OpG->getValueID() ||
!isEquivalentType(OpF->getType(), OpG->getType()))
return false;
}
ValueMap[OpF] = OpG;
}
ValueMap[FI] = GI;
++FI, ++GI;
} while (FI != FE && GI != GE);
return FI == FE && GI == GE;
}
static bool equals(const Function *F, const Function *G) {
bool MergeFunctions::equals(const Function *F, const Function *G) {
// We need to recheck everything, but check the things that weren't included
// in the hash first.
@ -382,27 +440,46 @@ static bool equals(const Function *F, const Function *G) {
if (!isEquivalentType(F->getFunctionType(), G->getFunctionType()))
return false;
DenseMap<const Value *, const Value *> ValueMap;
DenseMap<const Value *, const Value *> SpeculationMap;
ValueMap[F] = G;
assert(F->arg_size() == G->arg_size() &&
"Identical functions have a different number of args.");
LHS = F;
RHS = G;
// Visit the arguments so that they get enumerated in the order they're
// passed in.
for (Function::const_arg_iterator fi = F->arg_begin(), gi = G->arg_begin(),
fe = F->arg_end(); fi != fe; ++fi, ++gi)
ValueMap[fi] = gi;
if (!equals(&F->getEntryBlock(), &G->getEntryBlock(), ValueMap,
SpeculationMap))
return false;
for (DenseMap<const Value *, const Value *>::iterator
I = SpeculationMap.begin(), E = SpeculationMap.end(); I != E; ++I) {
if (ValueMap[I->first] != I->second)
return false;
fe = F->arg_end(); fi != fe; ++fi, ++gi) {
if (!compare(fi, gi))
llvm_unreachable("Arguments repeat");
}
SmallVector<const BasicBlock *, 8> FBBs, GBBs;
SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F.
FBBs.push_back(&F->getEntryBlock());
GBBs.push_back(&G->getEntryBlock());
VisitedBBs.insert(FBBs[0]);
while (!FBBs.empty()) {
const BasicBlock *FBB = FBBs.pop_back_val();
const BasicBlock *GBB = GBBs.pop_back_val();
if (!compare(FBB, GBB) || !equals(FBB, GBB)) {
Domains.clear();
DomainCount.clear();
return false;
}
const TerminatorInst *FTI = FBB->getTerminator();
const TerminatorInst *GTI = GBB->getTerminator();
assert(FTI->getNumSuccessors() == GTI->getNumSuccessors());
for (unsigned i = 0, e = FTI->getNumSuccessors(); i != e; ++i) {
if (!VisitedBBs.insert(FTI->getSuccessor(i)))
continue;
FBBs.push_back(FTI->getSuccessor(i));
GBBs.push_back(GTI->getSuccessor(i));
}
}
Domains.clear();
DomainCount.clear();
return true;
}
@ -476,20 +553,32 @@ static LinkageCategory categorize(const Function *F) {
}
static void ThunkGToF(Function *F, Function *G) {
if (!G->mayBeOverridden()) {
// Redirect direct callers of G to F.
Constant *BitcastF = ConstantExpr::getBitCast(F, G->getType());
for (Value::use_iterator UI = G->use_begin(), UE = G->use_end();
UI != UE;) {
Value::use_iterator TheIter = UI;
++UI;
CallSite CS(*TheIter);
if (CS && CS.isCallee(TheIter))
TheIter.getUse().set(BitcastF);
}
}
Function *NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "",
G->getParent());
BasicBlock *BB = BasicBlock::Create(F->getContext(), "", NewG);
std::vector<Value *> Args;
SmallVector<Value *, 16> Args;
unsigned i = 0;
const FunctionType *FFTy = F->getFunctionType();
for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end();
AI != AE; ++AI) {
if (FFTy->getParamType(i) == AI->getType())
if (FFTy->getParamType(i) == AI->getType()) {
Args.push_back(AI);
else {
Value *BCI = new BitCastInst(AI, FFTy->getParamType(i), "", BB);
Args.push_back(BCI);
} else {
Args.push_back(new BitCastInst(AI, FFTy->getParamType(i), "", BB));
}
++i;
}
@ -510,8 +599,6 @@ static void ThunkGToF(Function *F, Function *G) {
NewG->takeName(G);
G->replaceAllUsesWith(NewG);
G->eraseFromParent();
// TODO: look at direct callers to G and make them all direct callers to F.
}
static void AliasGToF(Function *F, Function *G) {
@ -542,67 +629,66 @@ static bool fold(std::vector<Function *> &FnVec, unsigned i, unsigned j) {
}
switch (catF) {
case ExternalStrong:
switch (catG) {
case ExternalStrong:
switch (catG) {
case ExternalStrong:
case ExternalWeak:
ThunkGToF(F, G);
break;
case Internal:
if (G->hasAddressTaken())
ThunkGToF(F, G);
else
AliasGToF(F, G);
break;
}
break;
case ExternalWeak: {
assert(catG == ExternalWeak);
// Make them both thunks to the same internal function.
F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
F->getParent());
H->copyAttributesFrom(F);
H->takeName(F);
F->replaceAllUsesWith(H);
case ExternalWeak:
ThunkGToF(F, G);
ThunkGToF(F, H);
F->setLinkage(GlobalValue::InternalLinkage);
} break;
case Internal:
switch (catG) {
case ExternalStrong:
llvm_unreachable(0);
// fall-through
case ExternalWeak:
if (F->hasAddressTaken())
ThunkGToF(F, G);
else
AliasGToF(F, G);
break;
case Internal: {
bool addrTakenF = F->hasAddressTaken();
bool addrTakenG = G->hasAddressTaken();
if (!addrTakenF && addrTakenG) {
std::swap(FnVec[i], FnVec[j]);
std::swap(F, G);
std::swap(addrTakenF, addrTakenG);
}
if (addrTakenF && addrTakenG) {
ThunkGToF(F, G);
} else {
assert(!addrTakenG);
AliasGToF(F, G);
}
} break;
}
break;
case Internal:
if (G->hasAddressTaken())
ThunkGToF(F, G);
else
AliasGToF(F, G);
break;
}
break;
case ExternalWeak: {
assert(catG == ExternalWeak);
// Make them both thunks to the same internal function.
F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
F->getParent());
H->copyAttributesFrom(F);
H->takeName(F);
F->replaceAllUsesWith(H);
ThunkGToF(F, G);
ThunkGToF(F, H);
F->setLinkage(GlobalValue::InternalLinkage);
} break;
case Internal:
switch (catG) {
case ExternalStrong:
llvm_unreachable(0);
// fall-through
case ExternalWeak:
if (F->hasAddressTaken())
ThunkGToF(F, G);
else
AliasGToF(F, G);
break;
case Internal: {
bool addrTakenF = F->hasAddressTaken();
bool addrTakenG = G->hasAddressTaken();
if (!addrTakenF && addrTakenG) {
std::swap(FnVec[i], FnVec[j]);
std::swap(F, G);
std::swap(addrTakenF, addrTakenG);
}
if (addrTakenF && addrTakenG) {
ThunkGToF(F, G);
} else {
assert(!addrTakenG);
AliasGToF(F, G);
}
} break;
} break;
}
++NumFunctionsMerged;
@ -619,22 +705,20 @@ bool MergeFunctions::runOnModule(Module &M) {
std::map<unsigned long, std::vector<Function *> > FnMap;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration() || F->isIntrinsic())
if (F->isDeclaration())
continue;
FnMap[hash(F)].push_back(F);
}
// TODO: instead of running in a loop, we could also fold functions in
// callgraph order. Constructing the CFG probably isn't cheaper than just
// running in a loop, unless it happened to already be available.
TD = getAnalysisIfAvailable<TargetData>();
bool LocalChanged;
do {
LocalChanged = false;
DEBUG(dbgs() << "size: " << FnMap.size() << "\n");
for (std::map<unsigned long, std::vector<Function *> >::iterator
I = FnMap.begin(), E = FnMap.end(); I != E; ++I) {
I = FnMap.begin(), E = FnMap.end(); I != E; ++I) {
std::vector<Function *> &FnVec = I->second;
DEBUG(dbgs() << "hash (" << I->first << "): " << FnVec.size() << "\n");