refactor the interface to InlineFunction so that most of the in/out

arguments are handled with a new InlineFunctionInfo class.  This 
makes it easier to extend InlineFunction to return more info in the
future.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@102137 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2010-04-22 23:07:58 +00:00
parent 9517144f53
commit 60915146f4
6 changed files with 64 additions and 47 deletions

View File

@ -19,6 +19,7 @@
#define LLVM_TRANSFORMS_UTILS_CLONING_H #define LLVM_TRANSFORMS_UTILS_CLONING_H
#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h" #include "llvm/ADT/Twine.h"
namespace llvm { namespace llvm {
@ -40,7 +41,6 @@ class TargetData;
class Loop; class Loop;
class LoopInfo; class LoopInfo;
class AllocaInst; class AllocaInst;
template <typename T> class SmallVectorImpl;
/// CloneModule - Return an exact copy of the specified module /// CloneModule - Return an exact copy of the specified module
/// ///
@ -158,6 +158,29 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
const TargetData *TD = 0, const TargetData *TD = 0,
Instruction *TheCall = 0); Instruction *TheCall = 0);
/// InlineFunctionInfo - This class captures the data input to the
/// InlineFunction call, and records the auxiliary results produced by it.
class InlineFunctionInfo {
public:
explicit InlineFunctionInfo(CallGraph *cg = 0, const TargetData *td = 0)
: CG(cg), TD(td) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
const TargetData *TD;
/// StaticAllocas - InlineFunction fills this in with all static allocas that
/// get copied into the caller.
SmallVector<AllocaInst*, 4> StaticAllocas;
void reset() {
StaticAllocas.clear();
}
};
/// InlineFunction - This function inlines the called function into the basic /// InlineFunction - This function inlines the called function into the basic
/// block of the caller. This returns false if it is not possible to inline /// block of the caller. This returns false if it is not possible to inline
/// this call. The program is still in a well defined state if this occurs /// this call. The program is still in a well defined state if this occurs
@ -168,18 +191,9 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// exists in the instruction stream. Similiarly this will inline a recursive /// exists in the instruction stream. Similiarly this will inline a recursive
/// function by one level. /// function by one level.
/// ///
/// If a non-null callgraph pointer is provided, these functions update the bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI);
/// CallGraph to represent the program after inlining. bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI);
/// bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI);
/// If StaticAllocas is non-null, InlineFunction populates it with all of the
/// static allocas that it inlines into the caller.
///
bool InlineFunction(CallInst *C, CallGraph *CG = 0, const TargetData *TD = 0,
SmallVectorImpl<AllocaInst*> *StaticAllocas = 0);
bool InlineFunction(InvokeInst *II, CallGraph *CG = 0, const TargetData *TD = 0,
SmallVectorImpl<AllocaInst*> *StaticAllocas = 0);
bool InlineFunction(CallSite CS, CallGraph *CG = 0, const TargetData *TD = 0,
SmallVectorImpl<AllocaInst*> *StaticAllocas = 0);
} // End llvm namespace } // End llvm namespace

View File

@ -73,16 +73,14 @@ InlinedArrayAllocasTy;
/// available from other functions inlined into the caller. If we are able to /// available from other functions inlined into the caller. If we are able to
/// inline this call site we attempt to reuse already available allocas or add /// inline this call site we attempt to reuse already available allocas or add
/// any new allocas to the set if not possible. /// any new allocas to the set if not possible.
static bool InlineCallIfPossible(CallSite CS, CallGraph &CG, static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
const TargetData *TD,
InlinedArrayAllocasTy &InlinedArrayAllocas) { InlinedArrayAllocasTy &InlinedArrayAllocas) {
Function *Callee = CS.getCalledFunction(); Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller(); Function *Caller = CS.getCaller();
// Try to inline the function. Get the list of static allocas that were // Try to inline the function. Get the list of static allocas that were
// inlined. // inlined.
SmallVector<AllocaInst*, 16> StaticAllocas; if (!InlineFunction(CS, IFI))
if (!InlineFunction(CS, &CG, TD, &StaticAllocas))
return false; return false;
// If the inlined function had a higher stack protection level than the // If the inlined function had a higher stack protection level than the
@ -119,9 +117,9 @@ static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
// Loop over all the allocas we have so far and see if they can be merged with // Loop over all the allocas we have so far and see if they can be merged with
// a previously inlined alloca. If not, remember that we had it. // a previously inlined alloca. If not, remember that we had it.
for (unsigned AllocaNo = 0, e = StaticAllocas.size(); for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
AllocaNo != e; ++AllocaNo) { AllocaNo != e; ++AllocaNo) {
AllocaInst *AI = StaticAllocas[AllocaNo]; AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
// Don't bother trying to merge array allocations (they will usually be // Don't bother trying to merge array allocations (they will usually be
// canonicalized to be an allocation *of* an array), or allocations whose // canonicalized to be an allocation *of* an array), or allocations whose
@ -347,6 +345,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
InlinedArrayAllocasTy InlinedArrayAllocas; InlinedArrayAllocasTy InlinedArrayAllocas;
InlineFunctionInfo InlineInfo(&CG, TD);
// Now that we have all of the call sites, loop over them and inline them if // Now that we have all of the call sites, loop over them and inline them if
// it looks profitable to do so. // it looks profitable to do so.
@ -385,7 +384,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
continue; continue;
// Attempt to inline the function... // Attempt to inline the function...
if (!InlineCallIfPossible(CS, CG, TD, InlinedArrayAllocas)) if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas))
continue; continue;
++NumInlined; ++NumInlined;

View File

@ -120,15 +120,17 @@ Function* PartialInliner::unswitchFunction(Function* F) {
// Extract the body of the if. // Extract the body of the if.
Function* extractedFunction = ExtractCodeRegion(DT, toExtract); Function* extractedFunction = ExtractCodeRegion(DT, toExtract);
InlineFunctionInfo IFI;
// Inline the top-level if test into all callers. // Inline the top-level if test into all callers.
std::vector<User*> Users(duplicateFunction->use_begin(), std::vector<User*> Users(duplicateFunction->use_begin(),
duplicateFunction->use_end()); duplicateFunction->use_end());
for (std::vector<User*>::iterator UI = Users.begin(), UE = Users.end(); for (std::vector<User*>::iterator UI = Users.begin(), UE = Users.end();
UI != UE; ++UI) UI != UE; ++UI)
if (CallInst* CI = dyn_cast<CallInst>(*UI)) if (CallInst *CI = dyn_cast<CallInst>(*UI))
InlineFunction(CI); InlineFunction(CI, IFI);
else if (InvokeInst* II = dyn_cast<InvokeInst>(*UI)) else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI))
InlineFunction(II); InlineFunction(II, IFI);
// Ditch the duplicate, since we're done with it, and rewrite all remaining // Ditch the duplicate, since we're done with it, and rewrite all remaining
// users (function pointers, etc.) back to the original function. // users (function pointers, etc.) back to the original function.

View File

@ -93,7 +93,8 @@ InlineHalfPowrs(const std::vector<Instruction *> &HalfPowrs,
// Inline the call, taking care of what code ends up where. // Inline the call, taking care of what code ends up where.
NewBlock = SplitBlock(NextInst->getParent(), NextInst, this); NewBlock = SplitBlock(NextInst->getParent(), NextInst, this);
bool B = InlineFunction(Call, 0, TD); InlineFunctionInfo IFI(0, TD);
bool B = InlineFunction(Call, IFI);
assert(B && "half_powr didn't inline?"); B=B; assert(B && "half_powr didn't inline?"); B=B;
BasicBlock *NewBody = NewBlock->getSinglePredecessor(); BasicBlock *NewBody = NewBlock->getSinglePredecessor();

View File

@ -129,7 +129,8 @@ void BasicInlinerImpl::inlineFunctions() {
} }
// Inline // Inline
if (InlineFunction(CS, NULL, TD)) { InlineFunctionInfo IFI(0, TD);
if (InlineFunction(CS, IFI)) {
if (Callee->use_empty() && (Callee->hasLocalLinkage() || if (Callee->use_empty() && (Callee->hasLocalLinkage() ||
Callee->hasAvailableExternallyLinkage())) Callee->hasAvailableExternallyLinkage()))
DeadFunctions.insert(Callee); DeadFunctions.insert(Callee);

View File

@ -28,13 +28,11 @@
#include "llvm/Support/CallSite.h" #include "llvm/Support/CallSite.h"
using namespace llvm; using namespace llvm;
bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD, bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
SmallVectorImpl<AllocaInst*> *StaticAllocas) { return InlineFunction(CallSite(CI), IFI);
return InlineFunction(CallSite(CI), CG, TD, StaticAllocas);
} }
bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD, bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
SmallVectorImpl<AllocaInst*> *StaticAllocas) { return InlineFunction(CallSite(II), IFI);
return InlineFunction(CallSite(II), CG, TD, StaticAllocas);
} }
@ -232,13 +230,15 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
// exists in the instruction stream. Similiarly this will inline a recursive // exists in the instruction stream. Similiarly this will inline a recursive
// function by one level. // function by one level.
// //
bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD, bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
SmallVectorImpl<AllocaInst*> *StaticAllocas) {
Instruction *TheCall = CS.getInstruction(); Instruction *TheCall = CS.getInstruction();
LLVMContext &Context = TheCall->getContext(); LLVMContext &Context = TheCall->getContext();
assert(TheCall->getParent() && TheCall->getParent()->getParent() && assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
"Instruction not in function!"); "Instruction not in function!");
// If IFI has any state in it, zap it before we fill it in.
IFI.reset();
const Function *CalledFunc = CS.getCalledFunction(); const Function *CalledFunc = CS.getCalledFunction();
if (CalledFunc == 0 || // Can't inline external function or indirect if (CalledFunc == 0 || // Can't inline external function or indirect
CalledFunc->isDeclaration() || // call, or call to a vararg function! CalledFunc->isDeclaration() || // call, or call to a vararg function!
@ -305,7 +305,7 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// Create the alloca. If we have TargetData, use nice alignment. // Create the alloca. If we have TargetData, use nice alignment.
unsigned Align = 1; unsigned Align = 1;
if (TD) Align = TD->getPrefTypeAlignment(AggTy); if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy);
Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Value *NewAlloca = new AllocaInst(AggTy, 0, Align,
I->getName(), I->getName(),
&*Caller->begin()->begin()); &*Caller->begin()->begin());
@ -318,11 +318,11 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall); Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
Value *Size; Value *Size;
if (TD == 0) if (IFI.TD == 0)
Size = ConstantExpr::getSizeOf(AggTy); Size = ConstantExpr::getSizeOf(AggTy);
else else
Size = ConstantInt::get(Type::getInt64Ty(Context), Size = ConstantInt::get(Type::getInt64Ty(Context),
TD->getTypeStoreSize(AggTy)); IFI.TD->getTypeStoreSize(AggTy));
// Always generate a memcpy of alignment 1 here because we don't know // Always generate a memcpy of alignment 1 here because we don't know
// the alignment of the src pointer. Other optimizations can infer // the alignment of the src pointer. Other optimizations can infer
@ -336,7 +336,7 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall); CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
// If we have a call graph, update it. // If we have a call graph, update it.
if (CG) { if (CallGraph *CG = IFI.CG) {
CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn); CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
CallGraphNode *CallerNode = (*CG)[Caller]; CallGraphNode *CallerNode = (*CG)[Caller];
CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN); CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
@ -355,14 +355,14 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// (which can happen, e.g., because an argument was constant), but we'll be // (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do. // happy with whatever the cloner can do.
CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i", CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
&InlinedFunctionInfo, TD, TheCall); &InlinedFunctionInfo, IFI.TD, TheCall);
// Remember the first block that is newly cloned over. // Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock; FirstNewBlock = LastBlock; ++FirstNewBlock;
// Update the callgraph if requested. // Update the callgraph if requested.
if (CG) if (IFI.CG)
UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, *CG); UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, *IFI.CG);
} }
// If there are any alloca instructions in the block that used to be the entry // If there are any alloca instructions in the block that used to be the entry
@ -389,13 +389,13 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// Keep track of the static allocas that we inline into the caller if the // Keep track of the static allocas that we inline into the caller if the
// StaticAllocas pointer is non-null. // StaticAllocas pointer is non-null.
if (StaticAllocas) StaticAllocas->push_back(AI); IFI.StaticAllocas.push_back(AI);
// Scan for the block of allocas that we can move over, and move them // Scan for the block of allocas that we can move over, and move them
// all at once. // all at once.
while (isa<AllocaInst>(I) && while (isa<AllocaInst>(I) &&
isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
if (StaticAllocas) StaticAllocas->push_back(cast<AllocaInst>(I)); IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
++I; ++I;
} }
@ -419,7 +419,7 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// If we are preserving the callgraph, add edges to the stacksave/restore // If we are preserving the callgraph, add edges to the stacksave/restore
// functions for the calls we insert. // functions for the calls we insert.
CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0; CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
if (CG) { if (CallGraph *CG = IFI.CG) {
StackSaveCGN = CG->getOrInsertFunction(StackSave); StackSaveCGN = CG->getOrInsertFunction(StackSave);
StackRestoreCGN = CG->getOrInsertFunction(StackRestore); StackRestoreCGN = CG->getOrInsertFunction(StackRestore);
CallerNode = (*CG)[Caller]; CallerNode = (*CG)[Caller];
@ -428,13 +428,13 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// Insert the llvm.stacksave. // Insert the llvm.stacksave.
CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack", CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack",
FirstNewBlock->begin()); FirstNewBlock->begin());
if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN); if (IFI.CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
// Insert a call to llvm.stackrestore before any return instructions in the // Insert a call to llvm.stackrestore before any return instructions in the
// inlined function. // inlined function.
for (unsigned i = 0, e = Returns.size(); i != e; ++i) { for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]); CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]);
if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN); if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
} }
// Count the number of StackRestore calls we insert. // Count the number of StackRestore calls we insert.
@ -447,7 +447,7 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
BB != E; ++BB) BB != E; ++BB)
if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI); CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI);
if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN); if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
++NumStackRestores; ++NumStackRestores;
} }
} }