Remove this code as it is currently completely broken and unmaintained.

If needed, this can be resurrected from CVS.

Note that several of the interfaces (e.g. the IPModRef ones) are supersumed
by generic AliasAnalysis interfaces that have been written since this code
was developed (and they are not DSA specific).


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19864 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2005-01-28 06:12:46 +00:00
parent d19d89a04c
commit 9cb992ab72
9 changed files with 0 additions and 2681 deletions

View File

@ -1,87 +0,0 @@
//===- DependenceGraph.cpp - Dependence graph for a function ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an explicit representation for the dependence graph
// of a function, with one node per instruction and one edge per dependence.
// Dependences include both data and control dependences.
//
// Each dep. graph node (class DepGraphNode) keeps lists of incoming and
// outgoing dependence edges.
//
// Each dep. graph edge (class Dependence) keeps a pointer to one end-point
// of the dependence. This saves space and is important because dep. graphs
// can grow quickly. It works just fine because the standard idiom is to
// start with a known node and enumerate the dependences to or from that node.
//===----------------------------------------------------------------------===//
#include "DependenceGraph.h"
#include "llvm/Function.h"
namespace llvm {
//----------------------------------------------------------------------------
// class Dependence:
//
// A representation of a simple (non-loop-related) dependence
//----------------------------------------------------------------------------
void Dependence::print(std::ostream &O) const
{
assert(depType != NoDependence && "This dependence should never be created!");
switch (depType) {
case TrueDependence: O << "TRUE dependence"; break;
case AntiDependence: O << "ANTI dependence"; break;
case OutputDependence: O << "OUTPUT dependence"; break;
case ControlDependence: O << "CONTROL dependence"; break;
default: assert(0 && "Invalid dependence type"); break;
}
}
//----------------------------------------------------------------------------
// class DepGraphNode
//----------------------------------------------------------------------------
void DepGraphNode::print(std::ostream &O) const
{
const_iterator DI = outDepBegin(), DE = outDepEnd();
O << "\nDeps. from instr:" << getInstr();
for ( ; DI != DE; ++DI)
{
O << "\t";
DI->print(O);
O << " to instruction:";
O << DI->getSink()->getInstr();
}
}
//----------------------------------------------------------------------------
// class DependenceGraph
//----------------------------------------------------------------------------
DependenceGraph::~DependenceGraph()
{
// Free all DepGraphNode objects created for this graph
for (map_iterator I = depNodeMap.begin(), E = depNodeMap.end(); I != E; ++I)
delete I->second;
}
void DependenceGraph::print(const Function& func, std::ostream &O) const
{
O << "DEPENDENCE GRAPH FOR FUNCTION " << func.getName() << ":\n";
for (Function::const_iterator BB=func.begin(), FE=func.end(); BB != FE; ++BB)
for (BasicBlock::const_iterator II=BB->begin(), IE=BB->end(); II !=IE; ++II)
if (const DepGraphNode* dgNode = this->getNode(*II))
dgNode->print(O);
}
} // End llvm namespace

View File

@ -1,255 +0,0 @@
//===- DependenceGraph.h - Dependence graph for a function ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides an explicit representation for the dependence graph
// of a function, with one node per instruction and one edge per dependence.
// Dependences include both data and control dependences.
//
// Each dep. graph node (class DepGraphNode) keeps lists of incoming and
// outgoing dependence edges.
//
// Each dep. graph edge (class Dependence) keeps a pointer to one end-point
// of the dependence. This saves space and is important because dep. graphs
// can grow quickly. It works just fine because the standard idiom is to
// start with a known node and enumerate the dependences to or from that node.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_DEPENDENCEGRAPH_H
#define LLVM_ANALYSIS_DEPENDENCEGRAPH_H
#include "llvm/ADT/hash_map"
#include <cassert>
#include <iosfwd>
#include <utility>
#include <vector>
namespace llvm {
class Instruction;
class Function;
class Dependence;
class DepGraphNode;
class DependenceGraph;
//----------------------------------------------------------------------------
/// enum DependenceType - The standard data dependence types
///
enum DependenceType {
NoDependence = 0x0,
TrueDependence = 0x1,
AntiDependence = 0x2,
OutputDependence = 0x4,
ControlDependence = 0x8, // from a terminator to some other instr.
IncomingFlag = 0x10 // is this an incoming or outgoing dep?
};
//----------------------------------------------------------------------------
/// Dependence Class - A representation of a simple (non-loop-related)
/// dependence.
///
class Dependence {
DepGraphNode* toOrFromNode;
unsigned char depType;
public:
Dependence(DepGraphNode* toOrFromN, DependenceType type, bool isIncoming)
: toOrFromNode(toOrFromN),
depType(type | (isIncoming? IncomingFlag : 0x0)) { }
Dependence(const Dependence& D) : toOrFromNode(D.toOrFromNode),
depType(D.depType) { }
bool operator==(const Dependence& D) const {
return toOrFromNode == D.toOrFromNode && depType == D.depType;
}
/// Get information about the type of dependence.
///
unsigned getDepType() const {
return depType;
}
/// Get source or sink depending on what type of node this is!
///
DepGraphNode* getSrc() {
assert(depType & IncomingFlag); return toOrFromNode;
}
const DepGraphNode* getSrc() const {
assert(depType & IncomingFlag); return toOrFromNode;
}
DepGraphNode* getSink() {
assert(! (depType & IncomingFlag)); return toOrFromNode;
}
const DepGraphNode* getSink() const {
assert(! (depType & IncomingFlag)); return toOrFromNode;
}
/// Debugging support methods
///
void print(std::ostream &O) const;
// Default constructor: Do not use directly except for graph builder code
//
Dependence() : toOrFromNode(NULL), depType(NoDependence) { }
};
#ifdef SUPPORTING_LOOP_DEPENDENCES
struct LoopDependence: public Dependence {
DependenceDirection dir;
int distance;
short level;
LoopInfo* enclosingLoop;
};
#endif
//----------------------------------------------------------------------------
/// DepGraphNode Class - A representation of a single node in a dependence
/// graph, corresponding to a single instruction.
///
class DepGraphNode {
Instruction* instr;
std::vector<Dependence> inDeps;
std::vector<Dependence> outDeps;
friend class DependenceGraph;
typedef std::vector<Dependence>:: iterator iterator;
typedef std::vector<Dependence>::const_iterator const_iterator;
iterator inDepBegin() { return inDeps.begin(); }
const_iterator inDepBegin() const { return inDeps.begin(); }
iterator inDepEnd() { return inDeps.end(); }
const_iterator inDepEnd() const { return inDeps.end(); }
iterator outDepBegin() { return outDeps.begin(); }
const_iterator outDepBegin() const { return outDeps.begin(); }
iterator outDepEnd() { return outDeps.end(); }
const_iterator outDepEnd() const { return outDeps.end(); }
public:
DepGraphNode(Instruction& I) : instr(&I) { }
Instruction& getInstr() { return *instr; }
const Instruction& getInstr() const { return *instr; }
/// Debugging support methods
///
void print(std::ostream &O) const;
};
//----------------------------------------------------------------------------
/// DependenceGraph Class - A representation of a dependence graph for a
/// procedure. The primary query operation here is to look up a DepGraphNode for
/// a particular instruction, and then use the in/out dependence iterators
/// for the node.
///
class DependenceGraph {
DependenceGraph(const DependenceGraph&); // DO NOT IMPLEMENT
void operator=(const DependenceGraph&); // DO NOT IMPLEMENT
typedef hash_map<Instruction*, DepGraphNode*> DepNodeMapType;
typedef DepNodeMapType:: iterator map_iterator;
typedef DepNodeMapType::const_iterator const_map_iterator;
DepNodeMapType depNodeMap;
inline DepGraphNode* getNodeInternal(Instruction& inst,
bool createIfMissing = false) {
map_iterator I = depNodeMap.find(&inst);
if (I == depNodeMap.end())
return (!createIfMissing)? NULL :
depNodeMap.insert(
std::make_pair(&inst, new DepGraphNode(inst))).first->second;
else
return I->second;
}
public:
typedef std::vector<Dependence>:: iterator iterator;
typedef std::vector<Dependence>::const_iterator const_iterator;
public:
DependenceGraph() { }
~DependenceGraph();
/// Get the graph node for an instruction. There will be one if and
/// only if there are any dependences incident on this instruction.
/// If there is none, these methods will return NULL.
///
DepGraphNode* getNode(Instruction& inst, bool createIfMissing = false) {
return getNodeInternal(inst, createIfMissing);
}
const DepGraphNode* getNode(const Instruction& inst) const {
return const_cast<DependenceGraph*>(this)
->getNodeInternal(const_cast<Instruction&>(inst));
}
iterator inDepBegin(DepGraphNode& T) {
return T.inDeps.begin();
}
const_iterator inDepBegin (const DepGraphNode& T) const {
return T.inDeps.begin();
}
iterator inDepEnd(DepGraphNode& T) {
return T.inDeps.end();
}
const_iterator inDepEnd(const DepGraphNode& T) const {
return T.inDeps.end();
}
iterator outDepBegin(DepGraphNode& F) {
return F.outDeps.begin();
}
const_iterator outDepBegin(const DepGraphNode& F) const {
return F.outDeps.begin();
}
iterator outDepEnd(DepGraphNode& F) {
return F.outDeps.end();
}
const_iterator outDepEnd(const DepGraphNode& F) const {
return F.outDeps.end();
}
/// Debugging support methods
///
void print(const Function& func, std::ostream &O) const;
public:
/// AddSimpleDependence - adding and modifying the dependence graph.
/// These should to be used only by dependence analysis implementations.
///
void AddSimpleDependence(Instruction& fromI, Instruction& toI,
DependenceType depType) {
DepGraphNode* fromNode = getNodeInternal(fromI, /*create*/ true);
DepGraphNode* toNode = getNodeInternal(toI, /*create*/ true);
fromNode->outDeps.push_back(Dependence(toNode, depType, false));
toNode-> inDeps. push_back(Dependence(fromNode, depType, true));
}
#ifdef SUPPORTING_LOOP_DEPENDENCES
// This interface is a placeholder to show what information is needed.
// It will probably change when it starts being used.
void AddLoopDependence(Instruction& fromI,
Instruction& toI,
DependenceType depType,
DependenceDirection dir,
int distance,
short level,
LoopInfo* enclosingLoop);
#endif // SUPPORTING_LOOP_DEPENDENCES
};
} // End llvm namespace
#endif

View File

@ -1,445 +0,0 @@
//===- IPModRef.cpp - Compute IP Mod/Ref information ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// See high-level comments in IPModRef.h
//
//===----------------------------------------------------------------------===//
#include "IPModRef.h"
#include "llvm/Analysis/DataStructure/DataStructure.h"
#include "llvm/Analysis/DataStructure/DSGraph.h"
#include "llvm/Module.h"
#include "llvm/Instructions.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include <vector>
namespace llvm {
//----------------------------------------------------------------------------
// Private constants and data
//----------------------------------------------------------------------------
static RegisterAnalysis<IPModRef>
Z("ipmodref", "Interprocedural mod/ref analysis");
//----------------------------------------------------------------------------
// class ModRefInfo
//----------------------------------------------------------------------------
void ModRefInfo::print(std::ostream &O,
const std::string& sprefix) const
{
O << sprefix << "Modified nodes = " << modNodeSet;
O << sprefix << "Referenced nodes = " << refNodeSet;
}
void ModRefInfo::dump() const
{
print(std::cerr);
}
//----------------------------------------------------------------------------
// class FunctionModRefInfo
//----------------------------------------------------------------------------
// This constructor computes a node numbering for the TD graph.
//
FunctionModRefInfo::FunctionModRefInfo(const Function& func,
IPModRef& ipmro,
DSGraph* tdgClone)
: F(func), IPModRefObj(ipmro),
funcTDGraph(tdgClone),
funcModRefInfo(tdgClone->getGraphSize())
{
unsigned i = 0;
for (DSGraph::node_iterator NI = funcTDGraph->node_begin(),
E = funcTDGraph->node_end(); NI != E; ++NI)
NodeIds[*NI] = i++;
}
FunctionModRefInfo::~FunctionModRefInfo()
{
for(std::map<const Instruction*, ModRefInfo*>::iterator
I=callSiteModRefInfo.begin(), E=callSiteModRefInfo.end(); I != E; ++I)
delete(I->second);
// Empty map just to make problems easier to track down
callSiteModRefInfo.clear();
delete funcTDGraph;
}
unsigned FunctionModRefInfo::getNodeId(const Value* value) const {
return getNodeId(funcTDGraph->getNodeForValue(const_cast<Value*>(value))
.getNode());
}
// Compute Mod/Ref bit vectors for the entire function.
// These are simply copies of the Read/Write flags from the nodes of
// the top-down DS graph.
//
void FunctionModRefInfo::computeModRef(const Function &func)
{
// Mark all nodes in the graph that are marked MOD as being mod
// and all those marked REF as being ref.
unsigned i = 0;
for (DSGraph::node_iterator NI = funcTDGraph->node_begin(),
E = funcTDGraph->node_end(); NI != E; ++NI, ++i) {
if ((*NI)->isModified()) funcModRefInfo.setNodeIsMod(i);
if ((*NI)->isRead()) funcModRefInfo.setNodeIsRef(i);
}
// Compute the Mod/Ref info for all call sites within the function.
// The call sites are recorded in the TD graph.
const std::vector<DSCallSite>& callSites = funcTDGraph->getFunctionCalls();
for (unsigned i = 0, N = callSites.size(); i < N; ++i)
computeModRef(callSites[i].getCallSite());
}
// ResolveCallSiteModRefInfo - This method performs the following actions:
//
// 1. It clones the top-down graph for the current function
// 2. It clears all of the mod/ref bits in the cloned graph
// 3. It then merges the bottom-up graph(s) for the specified call-site into
// the clone (bringing new mod/ref bits).
// 4. It returns the clone, and a mapping of nodes from the original TDGraph to
// the cloned graph with Mod/Ref info for the callsite.
//
// NOTE: Because this clones a dsgraph and returns it, the caller is responsible
// for deleting the returned graph!
// NOTE: This method may return a null pointer if it is unable to determine the
// requested information (because the call site calls an external
// function or we cannot determine the complete set of functions invoked).
//
DSGraph* FunctionModRefInfo::ResolveCallSiteModRefInfo(CallSite CS,
hash_map<const DSNode*, DSNodeHandle> &NodeMap)
{
// Step #0: Quick check if we are going to fail anyway: avoid
// all the graph cloning and map copying in steps #1 and #2.
//
if (const Function *F = CS.getCalledFunction()) {
if (F->isExternal())
return 0; // We cannot compute Mod/Ref info for this callsite...
} else {
// Eventually, should check here if any callee is external.
// For now we are not handling this case anyway.
std::cerr << "IP Mod/Ref indirect call not implemented yet: "
<< "Being conservative\n";
return 0; // We cannot compute Mod/Ref info for this callsite...
}
// Step #1: Clone the top-down graph...
DSGraph *Result = new DSGraph(*funcTDGraph, NodeMap);
// Step #2: Clear Mod/Ref information...
Result->maskNodeTypes(~(DSNode::Modified | DSNode::Read));
// Step #3: clone the bottom up graphs for the callees into the caller graph
if (Function *F = CS.getCalledFunction())
{
assert(!F->isExternal());
// Build up a DSCallSite for our invocation point here...
// If the call returns a value, make sure to merge the nodes...
DSNodeHandle RetVal;
if (DS::isPointerType(CS.getInstruction()->getType()))
RetVal = Result->getNodeForValue(CS.getInstruction());
// Populate the arguments list...
std::vector<DSNodeHandle> Args;
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I)
if (DS::isPointerType((*I)->getType()))
Args.push_back(Result->getNodeForValue(*I));
// Build the call site...
DSCallSite NCS(CS, RetVal, F, Args);
// Perform the merging now of the graph for the callee, which will
// come with mod/ref bits set...
Result->mergeInGraph(NCS, *F, IPModRefObj.getBUDSGraph(*F),
DSGraph::StripAllocaBit
| DSGraph::DontCloneCallNodes
| DSGraph::DontCloneAuxCallNodes);
}
else
assert(0 && "See error message");
// Remove dead nodes aggressively to match the caller's original graph.
Result->removeDeadNodes(DSGraph::KeepUnreachableGlobals);
// Step #4: Return the clone + the mapping (by ref)
return Result;
}
// Compute Mod/Ref bit vectors for a single call site.
// These are copies of the Read/Write flags from the nodes of
// the graph produced by clearing all flags in the caller's TD graph
// and then inlining the callee's BU graph into the caller's TD graph.
//
void
FunctionModRefInfo::computeModRef(CallSite CS)
{
// Allocate the mod/ref info for the call site. Bits automatically cleared.
ModRefInfo* callModRefInfo = new ModRefInfo(funcTDGraph->getGraphSize());
callSiteModRefInfo[CS.getInstruction()] = callModRefInfo;
// Get a copy of the graph for the callee with the callee inlined
hash_map<const DSNode*, DSNodeHandle> NodeMap;
DSGraph* csgp = ResolveCallSiteModRefInfo(CS, NodeMap);
if (!csgp)
{ // Callee's side effects are unknown: mark all nodes Mod and Ref.
// Eventually this should only mark nodes visible to the callee, i.e.,
// exclude stack variables not reachable from any outgoing argument
// or any global.
callModRefInfo->getModSet().set();
callModRefInfo->getRefSet().set();
return;
}
// For all nodes in the graph, extract the mod/ref information
for (DSGraph::node_iterator NI = funcTDGraph->node_begin(),
E = funcTDGraph->node_end(); NI != E; ++NI) {
DSNode* csgNode = NodeMap[*NI].getNode();
assert(csgNode && "Inlined and original graphs do not correspond!");
if (csgNode->isModified())
callModRefInfo->setNodeIsMod(getNodeId(*NI));
if (csgNode->isRead())
callModRefInfo->setNodeIsRef(getNodeId(*NI));
}
// Drop nodemap before we delete the graph...
NodeMap.clear();
delete csgp;
}
class DSGraphPrintHelper {
const DSGraph& tdGraph;
std::vector<std::vector<const Value*> > knownValues; // identifiable objects
public:
/*ctor*/ DSGraphPrintHelper(const FunctionModRefInfo& fmrInfo)
: tdGraph(fmrInfo.getFuncGraph())
{
knownValues.resize(tdGraph.getGraphSize());
// For every identifiable value, save Value pointer in knownValues[i]
for (hash_map<Value*, DSNodeHandle>::const_iterator
I = tdGraph.getScalarMap().begin(),
E = tdGraph.getScalarMap().end(); I != E; ++I)
if (isa<GlobalValue>(I->first) ||
isa<Argument>(I->first) ||
isa<LoadInst>(I->first) ||
isa<AllocaInst>(I->first) ||
isa<MallocInst>(I->first))
{
unsigned nodeId = fmrInfo.getNodeId(I->second.getNode());
knownValues[nodeId].push_back(I->first);
}
}
void printValuesInBitVec(std::ostream &O, const BitSetVector& bv) const
{
assert(bv.size() == knownValues.size());
if (bv.none())
{ // No bits are set: just say so and return
O << "\tNONE.\n";
return;
}
if (bv.all())
{ // All bits are set: just say so and return
O << "\tALL GRAPH NODES.\n";
return;
}
for (unsigned i=0, N=bv.size(); i < N; ++i)
if (bv.test(i))
{
O << "\tNode# " << i << " : ";
if (! knownValues[i].empty())
for (unsigned j=0, NV=knownValues[i].size(); j < NV; j++)
{
const Value* V = knownValues[i][j];
if (isa<GlobalValue>(V)) O << "(Global) ";
else if (isa<Argument>(V)) O << "(Target of FormalParm) ";
else if (isa<LoadInst>(V)) O << "(Target of LoadInst ) ";
else if (isa<AllocaInst>(V)) O << "(Target of AllocaInst) ";
else if (isa<MallocInst>(V)) O << "(Target of MallocInst) ";
if (V->hasName()) O << V->getName();
else if (isa<Instruction>(V)) O << *V;
else O << "(Value*) 0x" << (void*) V;
O << std::string((j < NV-1)? "; " : "\n");
}
#if 0
else
tdGraph.getNodes()[i]->print(O, /*graph*/ NULL);
#endif
}
}
};
// Print the results of the pass.
// Currently this just prints bit-vectors and is not very readable.
//
void FunctionModRefInfo::print(std::ostream &O) const
{
DSGraphPrintHelper DPH(*this);
O << "========== Mod/ref information for function "
<< F.getName() << "========== \n\n";
// First: Print Globals and Locals modified anywhere in the function.
//
O << " -----Mod/Ref in the body of function " << F.getName()<< ":\n";
O << " --Objects modified in the function body:\n";
DPH.printValuesInBitVec(O, funcModRefInfo.getModSet());
O << " --Objects referenced in the function body:\n";
DPH.printValuesInBitVec(O, funcModRefInfo.getRefSet());
O << " --Mod and Ref vectors for the nodes listed above:\n";
funcModRefInfo.print(O, "\t");
O << "\n";
// Second: Print Globals and Locals modified at each call site in function
//
for (std::map<const Instruction *, ModRefInfo*>::const_iterator
CI = callSiteModRefInfo.begin(), CE = callSiteModRefInfo.end();
CI != CE; ++CI)
{
O << " ----Mod/Ref information for call site\n" << *CI->first;
O << " --Objects modified at call site:\n";
DPH.printValuesInBitVec(O, CI->second->getModSet());
O << " --Objects referenced at call site:\n";
DPH.printValuesInBitVec(O, CI->second->getRefSet());
O << " --Mod and Ref vectors for the nodes listed above:\n";
CI->second->print(O, "\t");
O << "\n";
}
O << "\n";
}
void FunctionModRefInfo::dump() const
{
print(std::cerr);
}
//----------------------------------------------------------------------------
// class IPModRef: An interprocedural pass that computes IP Mod/Ref info.
//----------------------------------------------------------------------------
// Free the FunctionModRefInfo objects cached in funcToModRefInfoMap.
//
void IPModRef::releaseMemory()
{
for(std::map<const Function*, FunctionModRefInfo*>::iterator
I=funcToModRefInfoMap.begin(), E=funcToModRefInfoMap.end(); I != E; ++I)
delete(I->second);
// Clear map so memory is not re-released if we are called again
funcToModRefInfoMap.clear();
}
// Run the "interprocedural" pass on each function. This needs to do
// NO real interprocedural work because all that has been done the
// data structure analysis.
//
bool IPModRef::runOnModule(Module &theModule)
{
M = &theModule;
for (Module::const_iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI)
if (! FI->isExternal())
getFuncInfo(*FI, /*computeIfMissing*/ true);
return true;
}
FunctionModRefInfo& IPModRef::getFuncInfo(const Function& func,
bool computeIfMissing)
{
FunctionModRefInfo*& funcInfo = funcToModRefInfoMap[&func];
assert (funcInfo != NULL || computeIfMissing);
if (funcInfo == NULL)
{ // Create a new FunctionModRefInfo object.
// Clone the top-down graph and remove any dead nodes first, because
// otherwise original and merged graphs will not match.
// The memory for this graph clone will be freed by FunctionModRefInfo.
DSGraph* funcTDGraph =
new DSGraph(getAnalysis<TDDataStructures>().getDSGraph(func));
funcTDGraph->removeDeadNodes(DSGraph::KeepUnreachableGlobals);
funcInfo = new FunctionModRefInfo(func, *this, funcTDGraph); //auto-insert
funcInfo->computeModRef(func); // computes the mod/ref info
}
return *funcInfo;
}
/// getBUDSGraph - This method returns the BU data structure graph for F through
/// the use of the BUDataStructures object.
///
const DSGraph &IPModRef::getBUDSGraph(const Function &F) {
return getAnalysis<BUDataStructures>().getDSGraph(F);
}
// getAnalysisUsage - This pass requires top-down data structure graphs.
// It modifies nothing.
//
void IPModRef::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<LocalDataStructures>();
AU.addRequired<BUDataStructures>();
AU.addRequired<TDDataStructures>();
}
void IPModRef::print(std::ostream &O, const Module*) const
{
O << "\nRESULTS OF INTERPROCEDURAL MOD/REF ANALYSIS:\n\n";
for (std::map<const Function*, FunctionModRefInfo*>::const_iterator
mapI = funcToModRefInfoMap.begin(), mapE = funcToModRefInfoMap.end();
mapI != mapE; ++mapI)
mapI->second->print(O);
O << "\n";
}
void IPModRef::dump() const
{
print(std::cerr);
}
} // End llvm namespace

View File

@ -1,232 +0,0 @@
//===- IPModRef.h - Compute IP Mod/Ref information --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// class IPModRef:
//
// class IPModRef is an interprocedural analysis pass that computes
// flow-insensitive IP Mod and Ref information for every function
// (the GMOD and GREF problems) and for every call site (MOD and REF).
//
// In practice, this needs to do NO real interprocedural work because
// all that is needed is done by the data structure analysis.
// This uses the top-down DS graph for a function and the bottom-up DS graph
// for each callee (including the Mod/Ref flags in the bottom-up graph)
// to compute the set of nodes that are Mod and Ref for the function and
// for each of its call sites.
//
//
// class FunctionModRefInfo:
//
// The results of IPModRef are encapsulated in the class FunctionModRefInfo.
// The results are stored as bit vectors: bit i represents node i
// in the TD DSGraph for the current function. (This node numbering is
// implemented by class FunctionModRefInfo.) Each FunctionModRefInfo
// includes:
// -- 2 bit vectors for the function (GMOD and GREF), and
// -- 2 bit vectors for each call site (MOD and REF).
//
//
// IPModRef vs. Alias Analysis for Clients:
//
// The IPModRef pass does not provide simpler query interfaces for specific
// LLVM values, instructions, or pointers because those results should be
// obtained through alias analysis (e.g., class DSAliasAnalysis).
// class IPModRef is primarily meant for other analysis passes that need to
// use Mod/Ref information efficiently for more complicated purposes;
// the bit-vector representations make propagation very efficient.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_IPMODREF_H
#define LLVM_ANALYSIS_IPMODREF_H
#include "llvm/Pass.h"
#include "llvm/ADT/BitSetVector.h"
#include "llvm/ADT/hash_map"
namespace llvm {
class Module;
class Function;
class CallSite;
class Instruction;
class CallInst;
class InvokeInst;
class DSNode;
class DSGraph;
class DSNodeHandle;
class ModRefInfo; // Result of IP Mod/Ref for one entity
class FunctionModRefInfo; // ModRefInfo for a func and all calls in it
class IPModRef; // Pass that computes IP Mod/Ref info
//----------------------------------------------------------------------------
/// ModRefInfo Class - Representation of Mod/Ref information for a single
/// function or callsite. This is represented as a pair of bit vectors, one each
/// for Mod and Ref. Each bit vector is indexed by the node id of the DS graph
/// node index.
///
class ModRefInfo {
BitSetVector modNodeSet; // set of modified nodes
BitSetVector refNodeSet; // set of referenced nodes
public:
// Methods to construct ModRefInfo objects.
ModRefInfo(unsigned int numNodes)
: modNodeSet(numNodes),
refNodeSet(numNodes) { }
unsigned getSize() const {
assert(modNodeSet.size() == refNodeSet.size() &&
"Mod & Ref different size?");
return modNodeSet.size();
}
void setNodeIsMod (unsigned nodeId) { modNodeSet[nodeId] = true; }
void setNodeIsRef (unsigned nodeId) { refNodeSet[nodeId] = true; }
// Methods to query the mod/ref info
bool nodeIsMod (unsigned nodeId) const { return modNodeSet.test(nodeId); }
bool nodeIsRef (unsigned nodeId) const { return refNodeSet.test(nodeId); }
bool nodeIsKill(unsigned nodeId) const { return false; }
const BitSetVector& getModSet() const { return modNodeSet; }
BitSetVector& getModSet() { return modNodeSet; }
const BitSetVector& getRefSet() const { return refNodeSet; }
BitSetVector& getRefSet() { return refNodeSet; }
// Debugging support methods
void print(std::ostream &O, const std::string& prefix=std::string("")) const;
void dump() const;
};
//----------------------------------------------------------------------------
/// FunctionModRefInfo Class - Representation of the results of IP Mod/Ref
/// analysis for a function and for each of the call sites within the function.
/// Each of these are represented as bit vectors of size = the number of nodes
/// in the top-dwon DS graph of the function. Nodes are identified by their
/// nodeId, in the range [0 .. funcTDGraph.size()-1].
///
class FunctionModRefInfo {
const Function& F; // The function
IPModRef& IPModRefObj; // The IPModRef Object owning this
DSGraph* funcTDGraph; // Top-down DS graph for function
ModRefInfo funcModRefInfo; // ModRefInfo for the function body
std::map<const Instruction*, ModRefInfo*>
callSiteModRefInfo; // ModRefInfo for each callsite
std::map<const DSNode*, unsigned> NodeIds;
friend class IPModRef;
void computeModRef(const Function &func);
void computeModRef(CallSite call);
DSGraph*
ResolveCallSiteModRefInfo(CallSite CS,
hash_map<const DSNode*, DSNodeHandle> &NodeMap);
public:
FunctionModRefInfo(const Function& func, IPModRef &IPModRefObj,
DSGraph* tdgClone);
~FunctionModRefInfo();
// Identify the function and its relevant DS graph
//
const Function& getFunction() const { return F; }
const DSGraph& getFuncGraph() const { return *funcTDGraph; }
// Retrieve Mod/Ref results for a single call site and for the function body
//
const ModRefInfo* getModRefInfo(const Function& func) const {
return &funcModRefInfo;
}
const ModRefInfo* getModRefInfo(const CallInst& callInst) const {
std::map<const Instruction*, ModRefInfo*>::const_iterator I =
callSiteModRefInfo.find((Instruction*)&callInst);
return (I == callSiteModRefInfo.end()) ? NULL : I->second;
}
const ModRefInfo* getModRefInfo(const InvokeInst& II) const {
std::map<const Instruction*, ModRefInfo*>::const_iterator I =
callSiteModRefInfo.find((Instruction*)&II);
return (I == callSiteModRefInfo.end()) ? NULL : I->second;
}
// Get the nodeIds used to index all Mod/Ref information for current function
//
unsigned getNodeId(const DSNode* node) const {
std::map<const DSNode*, unsigned>::const_iterator iter = NodeIds.find(node);
assert(iter != NodeIds.end() && iter->second < funcModRefInfo.getSize());
return iter->second;
}
unsigned getNodeId(const Value* value) const;
// Debugging support methods
void print(std::ostream &O) const;
void dump() const;
};
//----------------------------------------------------------------------------
/// IPModRef Class - An interprocedural pass that computes IP Mod/Ref info for
/// functions and for individual call sites.
///
/// Given the DSGraph of a function, this class can be queried for
/// a ModRefInfo object describing all the nodes in the DSGraph that are
/// (a) modified, and (b) referenced during an execution of the function
/// from an arbitrary callsite, or during an execution of a single call-site
/// within the function.
///
class IPModRef : public ModulePass {
std::map<const Function*, FunctionModRefInfo*> funcToModRefInfoMap;
Module* M;
FunctionModRefInfo& getFuncInfo(const Function& func,
bool computeIfMissing = false);
public:
IPModRef() : M(NULL) {}
~IPModRef() {}
/// run - Driver function to run IP Mod/Ref on a Module.
/// This initializes the module reference, and then computes IPModRef
/// results immediately if demand-driven analysis was *not* specified.
///
virtual bool runOnModule(Module &M);
/// getFunctionModRefInfo - Retrieve the Mod/Ref information for a single
/// function
///
const FunctionModRefInfo& getFunctionModRefInfo(const Function& func) {
return getFuncInfo(func);
}
/// getBUDSGraph - This method returns the BU data structure graph for F
/// through the use of the BUDataStructures object.
///
const DSGraph &getBUDSGraph(const Function &F);
// Debugging support methods
//
void print(std::ostream &O, const Module* = 0) const;
void dump() const;
/// releaseMemory - Release memory held by this pass when the pass pipeline is
/// done
///
virtual void releaseMemory();
/// getAnalysisUsage - This pass requires top-down data structure graphs.
/// It modifies nothing.
///
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
};
} // End llvm namespace
#endif

View File

@ -1,502 +0,0 @@
//===- MemoryDepAnalysis.cpp - Compute dep graph for memory ops -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a pass (MemoryDepAnalysis) that computes memory-based
// data dependences between instructions for each function in a module.
// Memory-based dependences occur due to load and store operations, but
// also the side-effects of call instructions.
//
// The result of this pass is a DependenceGraph for each function
// representing the memory-based data dependences between instructions.
//
//===----------------------------------------------------------------------===//
#include "MemoryDepAnalysis.h"
#include "IPModRef.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/Analysis/DataStructure/DataStructure.h"
#include "llvm/Analysis/DataStructure/DSGraph.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/CFG.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/hash_map"
#include "llvm/ADT/hash_set"
namespace llvm {
///--------------------------------------------------------------------------
/// struct ModRefTable:
///
/// A data structure that tracks ModRefInfo for instructions:
/// -- modRefMap is a map of Instruction* -> ModRefInfo for the instr.
/// -- definers is a vector of instructions that define any node
/// -- users is a vector of instructions that reference any node
/// -- numUsersBeforeDef is a vector indicating that the number of users
/// seen before definers[i] is numUsersBeforeDef[i].
///
/// numUsersBeforeDef[] effectively tells us the exact interleaving of
/// definers and users within the ModRefTable.
/// This is only maintained when constructing the table for one SCC, and
/// not copied over from one table to another since it is no longer useful.
///--------------------------------------------------------------------------
class ModRefTable {
public:
typedef hash_map<Instruction*, ModRefInfo> ModRefMap;
typedef ModRefMap::const_iterator const_map_iterator;
typedef ModRefMap:: iterator map_iterator;
typedef std::vector<Instruction*>::const_iterator const_ref_iterator;
typedef std::vector<Instruction*>:: iterator ref_iterator;
ModRefMap modRefMap;
std::vector<Instruction*> definers;
std::vector<Instruction*> users;
std::vector<unsigned> numUsersBeforeDef;
// Iterators to enumerate all the defining instructions
const_ref_iterator defsBegin() const { return definers.begin(); }
ref_iterator defsBegin() { return definers.begin(); }
const_ref_iterator defsEnd() const { return definers.end(); }
ref_iterator defsEnd() { return definers.end(); }
// Iterators to enumerate all the user instructions
const_ref_iterator usersBegin() const { return users.begin(); }
ref_iterator usersBegin() { return users.begin(); }
const_ref_iterator usersEnd() const { return users.end(); }
ref_iterator usersEnd() { return users.end(); }
// Iterator identifying the last user that was seen *before* a
// specified def. In particular, all users in the half-closed range
// [ usersBegin(), usersBeforeDef_End(defPtr) )
// were seen *before* the specified def. All users in the half-closed range
// [ usersBeforeDef_End(defPtr), usersEnd() )
// were seen *after* the specified def.
//
ref_iterator usersBeforeDef_End(const_ref_iterator defPtr) {
unsigned defIndex = (unsigned) (defPtr - defsBegin());
assert(defIndex < numUsersBeforeDef.size());
assert(usersBegin() + numUsersBeforeDef[defIndex] <= usersEnd());
return usersBegin() + numUsersBeforeDef[defIndex];
}
const_ref_iterator usersBeforeDef_End(const_ref_iterator defPtr) const {
return const_cast<ModRefTable*>(this)->usersBeforeDef_End(defPtr);
}
//
// Modifier methods
//
void AddDef(Instruction* D) {
definers.push_back(D);
numUsersBeforeDef.push_back(users.size());
}
void AddUse(Instruction* U) {
users.push_back(U);
}
void Insert(const ModRefTable& fromTable) {
modRefMap.insert(fromTable.modRefMap.begin(), fromTable.modRefMap.end());
definers.insert(definers.end(),
fromTable.definers.begin(), fromTable.definers.end());
users.insert(users.end(),
fromTable.users.begin(), fromTable.users.end());
numUsersBeforeDef.clear(); /* fromTable.numUsersBeforeDef is ignored */
}
};
///--------------------------------------------------------------------------
/// class ModRefInfoBuilder:
///
/// A simple InstVisitor<> class that retrieves the Mod/Ref info for
/// Load/Store/Call instructions and inserts this information in
/// a ModRefTable. It also records all instructions that Mod any node
/// and all that use any node.
///--------------------------------------------------------------------------
class ModRefInfoBuilder : public InstVisitor<ModRefInfoBuilder> {
const DSGraph& funcGraph;
const FunctionModRefInfo& funcModRef;
class ModRefTable& modRefTable;
ModRefInfoBuilder(); // DO NOT IMPLEMENT
ModRefInfoBuilder(const ModRefInfoBuilder&); // DO NOT IMPLEMENT
void operator=(const ModRefInfoBuilder&); // DO NOT IMPLEMENT
public:
ModRefInfoBuilder(const DSGraph& _funcGraph,
const FunctionModRefInfo& _funcModRef,
ModRefTable& _modRefTable)
: funcGraph(_funcGraph), funcModRef(_funcModRef), modRefTable(_modRefTable)
{
}
// At a call instruction, retrieve the ModRefInfo using IPModRef results.
// Add the call to the defs list if it modifies any nodes and to the uses
// list if it refs any nodes.
//
void visitCallInst(CallInst& callInst) {
ModRefInfo safeModRef(funcGraph.getGraphSize());
const ModRefInfo* callModRef = funcModRef.getModRefInfo(callInst);
if (callModRef == NULL) {
// call to external/unknown function: mark all nodes as Mod and Ref
safeModRef.getModSet().set();
safeModRef.getRefSet().set();
callModRef = &safeModRef;
}
modRefTable.modRefMap.insert(std::make_pair(&callInst,
ModRefInfo(*callModRef)));
if (callModRef->getModSet().any())
modRefTable.AddDef(&callInst);
if (callModRef->getRefSet().any())
modRefTable.AddUse(&callInst);
}
// At a store instruction, add to the mod set the single node pointed to
// by the pointer argument of the store. Interestingly, if there is no
// such node, that would be a null pointer reference!
void visitStoreInst(StoreInst& storeInst) {
const DSNodeHandle& ptrNode =
funcGraph.getNodeForValue(storeInst.getPointerOperand());
if (const DSNode* target = ptrNode.getNode()) {
unsigned nodeId = funcModRef.getNodeId(target);
ModRefInfo& minfo =
modRefTable.modRefMap.insert(
std::make_pair(&storeInst,
ModRefInfo(funcGraph.getGraphSize()))).first->second;
minfo.setNodeIsMod(nodeId);
modRefTable.AddDef(&storeInst);
} else
std::cerr << "Warning: Uninitialized pointer reference!\n";
}
// At a load instruction, add to the ref set the single node pointed to
// by the pointer argument of the load. Interestingly, if there is no
// such node, that would be a null pointer reference!
void visitLoadInst(LoadInst& loadInst) {
const DSNodeHandle& ptrNode =
funcGraph.getNodeForValue(loadInst.getPointerOperand());
if (const DSNode* target = ptrNode.getNode()) {
unsigned nodeId = funcModRef.getNodeId(target);
ModRefInfo& minfo =
modRefTable.modRefMap.insert(
std::make_pair(&loadInst,
ModRefInfo(funcGraph.getGraphSize()))).first->second;
minfo.setNodeIsRef(nodeId);
modRefTable.AddUse(&loadInst);
} else
std::cerr << "Warning: Uninitialized pointer reference!\n";
}
};
//----------------------------------------------------------------------------
// class MemoryDepAnalysis: A dep. graph for load/store/call instructions
//----------------------------------------------------------------------------
/// getAnalysisUsage - This does not modify anything. It uses the Top-Down DS
/// Graph and IPModRef.
///
void MemoryDepAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<TDDataStructures>();
AU.addRequired<IPModRef>();
}
/// Basic dependence gathering algorithm, using scc_iterator on CFG:
///
/// for every SCC S in the CFG in PostOrder on the SCC DAG
/// {
/// for every basic block BB in S in *postorder*
/// for every instruction I in BB in reverse
/// Add (I, ModRef[I]) to ModRefCurrent
/// if (Mod[I] != NULL)
/// Add I to DefSetCurrent: { I \in S : Mod[I] != NULL }
/// if (Ref[I] != NULL)
/// Add I to UseSetCurrent: { I : Ref[I] != NULL }
///
/// for every def D in DefSetCurrent
///
/// // NOTE: D comes after itself iff S contains a loop
/// if (HasLoop(S) && D & D)
/// Add output-dep: D -> D2
///
/// for every def D2 *after* D in DefSetCurrent
/// // NOTE: D2 comes before D in execution order
/// if (D & D2)
/// Add output-dep: D2 -> D
/// if (HasLoop(S))
/// Add output-dep: D -> D2
///
/// for every use U in UseSetCurrent that was seen *before* D
/// // NOTE: U comes after D in execution order
/// if (U & D)
/// if (U != D || HasLoop(S))
/// Add true-dep: D -> U
/// if (HasLoop(S))
/// Add anti-dep: U -> D
///
/// for every use U in UseSetCurrent that was seen *after* D
/// // NOTE: U comes before D in execution order
/// if (U & D)
/// if (U != D || HasLoop(S))
/// Add anti-dep: U -> D
/// if (HasLoop(S))
/// Add true-dep: D -> U
///
/// for every def Dnext in DefSetAfter
/// // NOTE: Dnext comes after D in execution order
/// if (Dnext & D)
/// Add output-dep: D -> Dnext
///
/// for every use Unext in UseSetAfter
/// // NOTE: Unext comes after D in execution order
/// if (Unext & D)
/// Add true-dep: D -> Unext
///
/// for every use U in UseSetCurrent
/// for every def Dnext in DefSetAfter
/// // NOTE: Dnext comes after U in execution order
/// if (Dnext & D)
/// Add anti-dep: U -> Dnext
///
/// Add ModRefCurrent to ModRefAfter: { (I, ModRef[I] ) }
/// Add DefSetCurrent to DefSetAfter: { I : Mod[I] != NULL }
/// Add UseSetCurrent to UseSetAfter: { I : Ref[I] != NULL }
/// }
///
///
void MemoryDepAnalysis::ProcessSCC(std::vector<BasicBlock*> &S,
ModRefTable& ModRefAfter, bool hasLoop) {
ModRefTable ModRefCurrent;
ModRefTable::ModRefMap& mapCurrent = ModRefCurrent.modRefMap;
ModRefTable::ModRefMap& mapAfter = ModRefAfter.modRefMap;
// Builder class fills out a ModRefTable one instruction at a time.
// To use it, we just invoke it's visit function for each basic block:
//
// for each basic block BB in the SCC in *postorder*
// for each instruction I in BB in *reverse*
// ModRefInfoBuilder::visit(I)
// : Add (I, ModRef[I]) to ModRefCurrent.modRefMap
// : Add I to ModRefCurrent.definers if it defines any node
// : Add I to ModRefCurrent.users if it uses any node
//
ModRefInfoBuilder builder(*funcGraph, *funcModRef, ModRefCurrent);
for (std::vector<BasicBlock*>::iterator BI = S.begin(), BE = S.end();
BI != BE; ++BI)
// Note: BBs in the SCC<> created by scc_iterator are in postorder.
for (BasicBlock::reverse_iterator II=(*BI)->rbegin(), IE=(*BI)->rend();
II != IE; ++II)
builder.visit(*II);
/// for every def D in DefSetCurrent
///
for (ModRefTable::ref_iterator II=ModRefCurrent.defsBegin(),
IE=ModRefCurrent.defsEnd(); II != IE; ++II)
{
/// // NOTE: D comes after itself iff S contains a loop
/// if (HasLoop(S))
/// Add output-dep: D -> D2
if (hasLoop)
funcDepGraph->AddSimpleDependence(**II, **II, OutputDependence);
/// for every def D2 *after* D in DefSetCurrent
/// // NOTE: D2 comes before D in execution order
/// if (D2 & D)
/// Add output-dep: D2 -> D
/// if (HasLoop(S))
/// Add output-dep: D -> D2
for (ModRefTable::ref_iterator JI=II+1; JI != IE; ++JI)
if (!Disjoint(mapCurrent.find(*II)->second.getModSet(),
mapCurrent.find(*JI)->second.getModSet()))
{
funcDepGraph->AddSimpleDependence(**JI, **II, OutputDependence);
if (hasLoop)
funcDepGraph->AddSimpleDependence(**II, **JI, OutputDependence);
}
/// for every use U in UseSetCurrent that was seen *before* D
/// // NOTE: U comes after D in execution order
/// if (U & D)
/// if (U != D || HasLoop(S))
/// Add true-dep: U -> D
/// if (HasLoop(S))
/// Add anti-dep: D -> U
{
ModRefTable::ref_iterator JI=ModRefCurrent.usersBegin();
ModRefTable::ref_iterator JE = ModRefCurrent.usersBeforeDef_End(II);
for ( ; JI != JE; ++JI)
if (!Disjoint(mapCurrent.find(*II)->second.getModSet(),
mapCurrent.find(*JI)->second.getRefSet()))
{
if (*II != *JI || hasLoop)
funcDepGraph->AddSimpleDependence(**II, **JI, TrueDependence);
if (hasLoop)
funcDepGraph->AddSimpleDependence(**JI, **II, AntiDependence);
}
/// for every use U in UseSetCurrent that was seen *after* D
/// // NOTE: U comes before D in execution order
/// if (U & D)
/// if (U != D || HasLoop(S))
/// Add anti-dep: U -> D
/// if (HasLoop(S))
/// Add true-dep: D -> U
for (/*continue JI*/ JE = ModRefCurrent.usersEnd(); JI != JE; ++JI)
if (!Disjoint(mapCurrent.find(*II)->second.getModSet(),
mapCurrent.find(*JI)->second.getRefSet()))
{
if (*II != *JI || hasLoop)
funcDepGraph->AddSimpleDependence(**JI, **II, AntiDependence);
if (hasLoop)
funcDepGraph->AddSimpleDependence(**II, **JI, TrueDependence);
}
}
/// for every def Dnext in DefSetPrev
/// // NOTE: Dnext comes after D in execution order
/// if (Dnext & D)
/// Add output-dep: D -> Dnext
for (ModRefTable::ref_iterator JI=ModRefAfter.defsBegin(),
JE=ModRefAfter.defsEnd(); JI != JE; ++JI)
if (!Disjoint(mapCurrent.find(*II)->second.getModSet(),
mapAfter.find(*JI)->second.getModSet()))
funcDepGraph->AddSimpleDependence(**II, **JI, OutputDependence);
/// for every use Unext in UseSetAfter
/// // NOTE: Unext comes after D in execution order
/// if (Unext & D)
/// Add true-dep: D -> Unext
for (ModRefTable::ref_iterator JI=ModRefAfter.usersBegin(),
JE=ModRefAfter.usersEnd(); JI != JE; ++JI)
if (!Disjoint(mapCurrent.find(*II)->second.getModSet(),
mapAfter.find(*JI)->second.getRefSet()))
funcDepGraph->AddSimpleDependence(**II, **JI, TrueDependence);
}
///
/// for every use U in UseSetCurrent
/// for every def Dnext in DefSetAfter
/// // NOTE: Dnext comes after U in execution order
/// if (Dnext & D)
/// Add anti-dep: U -> Dnext
for (ModRefTable::ref_iterator II=ModRefCurrent.usersBegin(),
IE=ModRefCurrent.usersEnd(); II != IE; ++II)
for (ModRefTable::ref_iterator JI=ModRefAfter.defsBegin(),
JE=ModRefAfter.defsEnd(); JI != JE; ++JI)
if (!Disjoint(mapCurrent.find(*II)->second.getRefSet(),
mapAfter.find(*JI)->second.getModSet()))
funcDepGraph->AddSimpleDependence(**II, **JI, AntiDependence);
/// Add ModRefCurrent to ModRefAfter: { (I, ModRef[I] ) }
/// Add DefSetCurrent to DefSetAfter: { I : Mod[I] != NULL }
/// Add UseSetCurrent to UseSetAfter: { I : Ref[I] != NULL }
ModRefAfter.Insert(ModRefCurrent);
}
/// Debugging support methods
///
void MemoryDepAnalysis::print(std::ostream &O, const Module*) const
{
// TEMPORARY LOOP
for (hash_map<Function*, DependenceGraph*>::const_iterator
I = funcMap.begin(), E = funcMap.end(); I != E; ++I)
{
Function* func = I->first;
DependenceGraph* depGraph = I->second;
O << "\n================================================================\n";
O << "DEPENDENCE GRAPH FOR MEMORY OPERATIONS IN FUNCTION " << func->getName();
O << "\n================================================================\n\n";
depGraph->print(*func, O);
}
}
///
/// Run the pass on a function
///
bool MemoryDepAnalysis::runOnFunction(Function &F) {
assert(!F.isExternal());
// Get the FunctionModRefInfo holding IPModRef results for this function.
// Use the TD graph recorded within the FunctionModRefInfo object, which
// may not be the same as the original TD graph computed by DS analysis.
//
funcModRef = &getAnalysis<IPModRef>().getFunctionModRefInfo(F);
funcGraph = &funcModRef->getFuncGraph();
// TEMPORARY: ptr to depGraph (later just becomes "this").
assert(!funcMap.count(&F) && "Analyzing function twice?");
funcDepGraph = funcMap[&F] = new DependenceGraph();
ModRefTable ModRefAfter;
for (scc_iterator<Function*> I = scc_begin(&F), E = scc_end(&F); I != E; ++I)
ProcessSCC(*I, ModRefAfter, I.hasLoop());
return true;
}
//-------------------------------------------------------------------------
// TEMPORARY FUNCTIONS TO MAKE THIS A MODULE PASS ---
// These functions will go away once this class becomes a FunctionPass.
//
// Driver function to compute dependence graphs for every function.
// This is temporary and will go away once this is a FunctionPass.
//
bool MemoryDepAnalysis::runOnModule(Module& M)
{
for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI)
if (! FI->isExternal())
runOnFunction(*FI); // automatically inserts each depGraph into funcMap
return true;
}
// Release all the dependence graphs in the map.
void MemoryDepAnalysis::releaseMemory()
{
for (hash_map<Function*, DependenceGraph*>::const_iterator
I = funcMap.begin(), E = funcMap.end(); I != E; ++I)
delete I->second;
funcMap.clear();
// Clear pointers because the pass constructor will not be invoked again.
funcDepGraph = NULL;
funcGraph = NULL;
funcModRef = NULL;
}
MemoryDepAnalysis::~MemoryDepAnalysis()
{
releaseMemory();
}
//----END TEMPORARY FUNCTIONS----------------------------------------------
void MemoryDepAnalysis::dump() const
{
this->print(std::cerr);
}
static RegisterAnalysis<MemoryDepAnalysis>
Z("memdep", "Memory Dependence Analysis");
} // End llvm namespace

View File

@ -1,103 +0,0 @@
//===- MemoryDepAnalysis.h - Compute dep graph for memory ops ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides a pass (MemoryDepAnalysis) that computes memory-based
// data dependences between instructions for each function in a module.
// Memory-based dependences occur due to load and store operations, but
// also the side-effects of call instructions.
//
// The result of this pass is a DependenceGraph for each function
// representing the memory-based data dependences between instructions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_MEMORYDEPANALYSIS_H
#define LLVM_ANALYSIS_MEMORYDEPANALYSIS_H
#include "DependenceGraph.h"
#include "llvm/Pass.h"
#include "llvm/ADT/hash_map"
namespace llvm {
class ModRefTable;
class DSGraph;
class FunctionModRefInfo;
///---------------------------------------------------------------------------
/// class MemoryDepGraph:
/// Dependence analysis for load/store/call instructions using IPModRef info
/// computed at the granularity of individual DSGraph nodes.
///
/// This pass computes memory dependences for each function in a module.
/// It can be made a FunctionPass once a Pass (such as Parallelize) is
/// allowed to use a FunctionPass such as this one.
///---------------------------------------------------------------------------
class MemoryDepAnalysis : public ModulePass {
/// The following map and depGraph pointer are temporary until this class
/// becomes a FunctionPass instead of a module Pass.
hash_map<Function*, DependenceGraph*> funcMap;
DependenceGraph* funcDepGraph;
/// Information about one function being analyzed.
const DSGraph* funcGraph;
const FunctionModRefInfo* funcModRef;
/// Internal routine that processes each SCC of the CFG.
///
void ProcessSCC(std::vector<BasicBlock*> &SCC, ModRefTable& ModRefAfter,
bool HasLoop);
friend class PgmDependenceGraph;
public:
MemoryDepAnalysis() : funcDepGraph(0), funcGraph(0), funcModRef(0) {}
~MemoryDepAnalysis();
/// Driver function to compute dependence graphs for every function.
///
bool runOnModule(Module &M);
/// getGraph - Retrieve the dependence graph for a function.
/// This is temporary and will go away once this is a FunctionPass.
/// At that point, this class should directly inherit from DependenceGraph.
///
DependenceGraph& getGraph(Function& F) {
hash_map<Function*, DependenceGraph*>::iterator I = funcMap.find(&F);
assert(I != funcMap.end());
return *I->second;
}
const DependenceGraph& getGraph(Function& F) const {
hash_map<Function*, DependenceGraph*>::const_iterator I = funcMap.find(&F);
assert(I != funcMap.end());
return *I->second;
}
/// Release depGraphs held in the Function -> DepGraph map.
///
virtual void releaseMemory();
/// Driver functions to compute the Load/Store Dep. Graph per function.
///
bool runOnFunction(Function &F);
/// getAnalysisUsage - This does not modify anything. It uses the Top-Down DS
/// Graph and IPModRef.
void getAnalysisUsage(AnalysisUsage &AU) const;
/// Debugging support methods
///
void print(std::ostream &O, const Module* = 0) const;
void dump() const;
};
} // End llvm namespace
#endif

View File

@ -1,498 +0,0 @@
//===- Parallelize.cpp - Auto parallelization using DS Graphs -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a pass that automatically parallelizes a program,
// using the Cilk multi-threaded runtime system to execute parallel code.
//
// The pass uses the Program Dependence Graph (class PDGIterator) to
// identify parallelizable function calls, i.e., calls whose instances
// can be executed in parallel with instances of other function calls.
// (In the future, this should also execute different instances of the same
// function call in parallel, but that requires parallelizing across
// loop iterations.)
//
// The output of the pass is LLVM code with:
// (1) all parallelizable functions renamed to flag them as parallelizable;
// (2) calls to a sync() function introduced at synchronization points.
// The CWriter recognizes these functions and inserts the appropriate Cilk
// keywords when writing out C code. This C code must be compiled with cilk2c.
//
// Current algorithmic limitations:
// -- no array dependence analysis
// -- no parallelization for function calls in different loop iterations
// (except in unlikely trivial cases)
//
// Limitations of using Cilk:
// -- No parallelism within a function body, e.g., in a loop;
// -- Simplistic synchronization model requiring all parallel threads
// created within a function to block at a sync().
// -- Excessive overhead at "spawned" function calls, which has no benefit
// once all threads are busy (especially common when the degree of
// parallelism is low).
//
//===----------------------------------------------------------------------===//
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "PgmDependenceGraph.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/DataStructure/DataStructure.h"
#include "llvm/Analysis/DataStructure/DSGraph.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/hash_set"
#include "llvm/ADT/hash_map"
#include <functional>
#include <algorithm>
using namespace llvm;
//----------------------------------------------------------------------------
// Global constants used in marking Cilk functions and function calls.
//----------------------------------------------------------------------------
static const char * const CilkSuffix = ".llvm2cilk";
static const char * const DummySyncFuncName = "__sync.llvm2cilk";
//----------------------------------------------------------------------------
// Routines to identify Cilk functions, calls to Cilk functions, and syncs.
//----------------------------------------------------------------------------
static bool isCilk(const Function& F) {
return (F.getName().rfind(CilkSuffix) ==
F.getName().size() - std::strlen(CilkSuffix));
}
static bool isCilkMain(const Function& F) {
return F.getName() == "main" + std::string(CilkSuffix);
}
static bool isCilk(const CallInst& CI) {
return CI.getCalledFunction() && isCilk(*CI.getCalledFunction());
}
static bool isSync(const CallInst& CI) {
return CI.getCalledFunction() &&
CI.getCalledFunction()->getName() == DummySyncFuncName;
}
//----------------------------------------------------------------------------
// class Cilkifier
//
// Code generation pass that transforms code to identify where Cilk keywords
// should be inserted. This relies on `llvm-dis -c' to print out the keywords.
//----------------------------------------------------------------------------
class Cilkifier: public InstVisitor<Cilkifier> {
Function* DummySyncFunc;
// Data used when transforming each function.
hash_set<const Instruction*> stmtsVisited; // Flags for recursive DFS
hash_map<const CallInst*, hash_set<CallInst*> > spawnToSyncsMap;
// Input data for the transformation.
const hash_set<Function*>* cilkFunctions; // Set of parallel functions
PgmDependenceGraph* depGraph;
void DFSVisitInstr (Instruction* I,
Instruction* root,
hash_set<const Instruction*>& depsOfRoot);
public:
/*ctor*/ Cilkifier (Module& M);
// Transform a single function including its name, its call sites, and syncs
//
void TransformFunc (Function* F,
const hash_set<Function*>& cilkFunctions,
PgmDependenceGraph& _depGraph);
// The visitor function that does most of the hard work, via DFSVisitInstr
//
void visitCallInst(CallInst& CI);
};
Cilkifier::Cilkifier(Module& M) {
// create the dummy Sync function and add it to the Module
DummySyncFunc = M.getOrInsertFunction(DummySyncFuncName, Type::VoidTy, 0);
}
void Cilkifier::TransformFunc(Function* F,
const hash_set<Function*>& _cilkFunctions,
PgmDependenceGraph& _depGraph) {
// Memoize the information for this function
cilkFunctions = &_cilkFunctions;
depGraph = &_depGraph;
// Add the marker suffix to the Function name
// This should automatically mark all calls to the function also!
F->setName(F->getName() + CilkSuffix);
// Insert sync operations for each separate spawn
visit(*F);
// Now traverse the CFG in rPostorder and eliminate redundant syncs, i.e.,
// two consecutive sync's on a straight-line path with no intervening spawn.
}
void Cilkifier::DFSVisitInstr(Instruction* I,
Instruction* root,
hash_set<const Instruction*>& depsOfRoot)
{
assert(stmtsVisited.find(I) == stmtsVisited.end());
stmtsVisited.insert(I);
// If there is a dependence from root to I, insert Sync and return
if (depsOfRoot.find(I) != depsOfRoot.end()) {
// Insert a sync before I and stop searching along this path.
// If I is a Phi instruction, the dependence can only be an SSA dep.
// and we need to insert the sync in the predecessor on the appropriate
// incoming edge!
CallInst* syncI = 0;
if (PHINode* phiI = dyn_cast<PHINode>(I)) {
// check all operands of the Phi and insert before each one
for (unsigned i = 0, N = phiI->getNumIncomingValues(); i < N; ++i)
if (phiI->getIncomingValue(i) == root)
syncI = new CallInst(DummySyncFunc, std::vector<Value*>(), "",
phiI->getIncomingBlock(i)->getTerminator());
} else
syncI = new CallInst(DummySyncFunc, std::vector<Value*>(), "", I);
// Remember the sync for each spawn to eliminate redundant ones later
spawnToSyncsMap[cast<CallInst>(root)].insert(syncI);
return;
}
// else visit unvisited successors
if (BranchInst* brI = dyn_cast<BranchInst>(I)) {
// visit first instruction in each successor BB
for (unsigned i = 0, N = brI->getNumSuccessors(); i < N; ++i)
if (stmtsVisited.find(&brI->getSuccessor(i)->front())
== stmtsVisited.end())
DFSVisitInstr(&brI->getSuccessor(i)->front(), root, depsOfRoot);
} else
if (Instruction* nextI = I->getNext())
if (stmtsVisited.find(nextI) == stmtsVisited.end())
DFSVisitInstr(nextI, root, depsOfRoot);
}
void Cilkifier::visitCallInst(CallInst& CI)
{
assert(CI.getCalledFunction() != 0 && "Only direct calls can be spawned.");
if (cilkFunctions->find(CI.getCalledFunction()) == cilkFunctions->end())
return; // not a spawn
// Find all the outgoing memory dependences.
hash_set<const Instruction*> depsOfRoot;
for (PgmDependenceGraph::iterator DI =
depGraph->outDepBegin(CI, MemoryDeps); ! DI.fini(); ++DI)
depsOfRoot.insert(&DI->getSink()->getInstr());
// Now find all outgoing SSA dependences to the eventual non-Phi users of
// the call value (i.e., direct users that are not phis, and for any
// user that is a Phi, direct non-Phi users of that Phi, and recursively).
std::vector<const PHINode*> phiUsers;
hash_set<const PHINode*> phisSeen; // ensures we don't visit a phi twice
for (Value::use_iterator UI=CI.use_begin(), UE=CI.use_end(); UI != UE; ++UI)
if (const PHINode* phiUser = dyn_cast<PHINode>(*UI)) {
if (phisSeen.find(phiUser) == phisSeen.end()) {
phiUsers.push_back(phiUser);
phisSeen.insert(phiUser);
}
}
else
depsOfRoot.insert(cast<Instruction>(*UI));
// Now we've found the non-Phi users and immediate phi users.
// Recursively walk the phi users and add their non-phi users.
for (const PHINode* phiUser; !phiUsers.empty(); phiUsers.pop_back()) {
phiUser = phiUsers.back();
for (Value::use_const_iterator UI=phiUser->use_begin(),
UE=phiUser->use_end(); UI != UE; ++UI)
if (const PHINode* pn = dyn_cast<PHINode>(*UI)) {
if (phisSeen.find(pn) == phisSeen.end()) {
phiUsers.push_back(pn);
phisSeen.insert(pn);
}
} else
depsOfRoot.insert(cast<Instruction>(*UI));
}
// Walk paths of the CFG starting at the call instruction and insert
// one sync before the first dependence on each path, if any.
if (! depsOfRoot.empty()) {
stmtsVisited.clear(); // start a new DFS for this CallInst
assert(CI.getNext() && "Call instruction cannot be a terminator!");
DFSVisitInstr(CI.getNext(), &CI, depsOfRoot);
}
// Now, eliminate all users of the SSA value of the CallInst, i.e.,
// if the call instruction returns a value, delete the return value
// register and replace it by a stack slot.
if (CI.getType() != Type::VoidTy)
DemoteRegToStack(CI);
}
//----------------------------------------------------------------------------
// class FindParallelCalls
//
// Find all CallInst instructions that have at least one other CallInst
// that is independent. These are the instructions that can produce
// useful parallelism.
//----------------------------------------------------------------------------
class FindParallelCalls : public InstVisitor<FindParallelCalls> {
typedef hash_set<CallInst*> DependentsSet;
typedef DependentsSet::iterator Dependents_iterator;
typedef DependentsSet::const_iterator Dependents_const_iterator;
PgmDependenceGraph& depGraph; // dependence graph for the function
hash_set<Instruction*> stmtsVisited; // flags for DFS walk of depGraph
hash_map<CallInst*, bool > completed; // flags marking if a CI is done
hash_map<CallInst*, DependentsSet> dependents; // dependent CIs for each CI
void VisitOutEdges(Instruction* I,
CallInst* root,
DependentsSet& depsOfRoot);
FindParallelCalls(const FindParallelCalls &); // DO NOT IMPLEMENT
void operator=(const FindParallelCalls&); // DO NOT IMPLEMENT
public:
std::vector<CallInst*> parallelCalls;
public:
/*ctor*/ FindParallelCalls (Function& F, PgmDependenceGraph& DG);
void visitCallInst (CallInst& CI);
};
FindParallelCalls::FindParallelCalls(Function& F,
PgmDependenceGraph& DG)
: depGraph(DG)
{
// Find all CallInsts reachable from each CallInst using a recursive DFS
visit(F);
// Now we've found all CallInsts reachable from each CallInst.
// Find those CallInsts that are parallel with at least one other CallInst
// by counting total inEdges and outEdges.
unsigned long totalNumCalls = completed.size();
if (totalNumCalls == 1) {
// Check first for the special case of a single call instruction not
// in any loop. It is not parallel, even if it has no dependences
// (this is why it is a special case).
//
// FIXME:
// THIS CASE IS NOT HANDLED RIGHT NOW, I.E., THERE IS NO
// PARALLELISM FOR CALLS IN DIFFERENT ITERATIONS OF A LOOP.
return;
}
hash_map<CallInst*, unsigned long> numDeps;
for (hash_map<CallInst*, DependentsSet>::iterator II = dependents.begin(),
IE = dependents.end(); II != IE; ++II) {
CallInst* fromCI = II->first;
numDeps[fromCI] += II->second.size();
for (Dependents_iterator DI = II->second.begin(), DE = II->second.end();
DI != DE; ++DI)
numDeps[*DI]++; // *DI can be reached from II->first
}
for (hash_map<CallInst*, DependentsSet>::iterator
II = dependents.begin(), IE = dependents.end(); II != IE; ++II)
// FIXME: Remove "- 1" when considering parallelism in loops
if (numDeps[II->first] < totalNumCalls - 1)
parallelCalls.push_back(II->first);
}
void FindParallelCalls::VisitOutEdges(Instruction* I,
CallInst* root,
DependentsSet& depsOfRoot)
{
assert(stmtsVisited.find(I) == stmtsVisited.end() && "Stmt visited twice?");
stmtsVisited.insert(I);
if (CallInst* CI = dyn_cast<CallInst>(I))
// FIXME: Ignoring parallelism in a loop. Here we're actually *ignoring*
// a self-dependence in order to get the count comparison right above.
// When we include loop parallelism, self-dependences should be included.
if (CI != root) {
// CallInst root has a path to CallInst I and any calls reachable from I
depsOfRoot.insert(CI);
if (completed[CI]) {
// We have already visited I so we know all nodes it can reach!
DependentsSet& depsOfI = dependents[CI];
depsOfRoot.insert(depsOfI.begin(), depsOfI.end());
return;
}
}
// If we reach here, we need to visit all children of I
for (PgmDependenceGraph::iterator DI = depGraph.outDepBegin(*I);
! DI.fini(); ++DI) {
Instruction* sink = &DI->getSink()->getInstr();
if (stmtsVisited.find(sink) == stmtsVisited.end())
VisitOutEdges(sink, root, depsOfRoot);
}
}
void FindParallelCalls::visitCallInst(CallInst& CI) {
if (completed[&CI])
return;
stmtsVisited.clear(); // clear flags to do a fresh DFS
// Visit all children of CI using a recursive walk through dep graph
DependentsSet& depsOfRoot = dependents[&CI];
for (PgmDependenceGraph::iterator DI = depGraph.outDepBegin(CI);
! DI.fini(); ++DI) {
Instruction* sink = &DI->getSink()->getInstr();
if (stmtsVisited.find(sink) == stmtsVisited.end())
VisitOutEdges(sink, &CI, depsOfRoot);
}
completed[&CI] = true;
}
//----------------------------------------------------------------------------
// class Parallelize
//
// (1) Find candidate parallel functions: any function F s.t.
// there is a call C1 to the function F that is followed or preceded
// by at least one other call C2 that is independent of this one
// (i.e., there is no dependence path from C1 to C2 or C2 to C1)
// (2) Label such a function F as a cilk function.
// (3) Convert every call to F to a spawn
// (4) For every function X, insert sync statements so that
// every spawn is postdominated by a sync before any statements
// with a data dependence to/from the call site for the spawn
//
//----------------------------------------------------------------------------
namespace {
class Parallelize : public ModulePass {
public:
/// Driver functions to transform a program
///
bool runOnModule(Module& M);
/// getAnalysisUsage - Modifies extensively so preserve nothing.
/// Uses the DependenceGraph and the Top-down DS Graph (only to find
/// all functions called via an indirect call).
///
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TDDataStructures>();
AU.addRequired<MemoryDepAnalysis>(); // force this not to be released
AU.addRequired<PgmDependenceGraph>(); // because it is needed by this
}
};
RegisterOpt<Parallelize> X("parallel", "Parallelize program using Cilk");
}
ModulePass *llvm::createParallelizePass() { return new Parallelize(); }
bool Parallelize::runOnModule(Module& M) {
hash_set<Function*> parallelFunctions;
hash_set<Function*> safeParallelFunctions;
hash_set<const GlobalValue*> indirectlyCalled;
// If there is no main (i.e., for an incomplete program), we can do nothing.
// If there is a main, mark main as a parallel function.
Function* mainFunc = M.getMainFunction();
if (!mainFunc)
return false;
// (1) Find candidate parallel functions and mark them as Cilk functions
for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI)
if (! FI->isExternal()) {
Function* F = FI;
DSGraph& tdg = getAnalysis<TDDataStructures>().getDSGraph(*F);
// All the hard analysis work gets done here!
FindParallelCalls finder(*F,
getAnalysis<PgmDependenceGraph>().getGraph(*F));
/* getAnalysis<MemoryDepAnalysis>().getGraph(*F)); */
// Now we know which call instructions are useful to parallelize.
// Remember those callee functions.
for (std::vector<CallInst*>::iterator
CII = finder.parallelCalls.begin(),
CIE = finder.parallelCalls.end(); CII != CIE; ++CII) {
// Check if this is a direct call...
if ((*CII)->getCalledFunction() != NULL) {
// direct call: if this is to a non-external function,
// mark it as a parallelizable function
if (! (*CII)->getCalledFunction()->isExternal())
parallelFunctions.insert((*CII)->getCalledFunction());
} else {
// Indirect call: mark all potential callees as bad
std::vector<GlobalValue*> callees =
tdg.getNodeForValue((*CII)->getCalledValue())
.getNode()->getGlobals();
indirectlyCalled.insert(callees.begin(), callees.end());
}
}
}
// Remove all indirectly called functions from the list of Cilk functions.
for (hash_set<Function*>::iterator PFI = parallelFunctions.begin(),
PFE = parallelFunctions.end(); PFI != PFE; ++PFI)
if (indirectlyCalled.count(*PFI) == 0)
safeParallelFunctions.insert(*PFI);
#undef CAN_USE_BIND1ST_ON_REFERENCE_TYPE_ARGS
#ifdef CAN_USE_BIND1ST_ON_REFERENCE_TYPE_ARGS
// Use this indecipherable STLese because erase invalidates iterators.
// Otherwise we have to copy sets as above.
hash_set<Function*>::iterator extrasBegin =
std::remove_if(parallelFunctions.begin(), parallelFunctions.end(),
compose1(std::bind2nd(std::greater<int>(), 0),
bind_obj(&indirectlyCalled,
&hash_set<const GlobalValue*>::count)));
parallelFunctions.erase(extrasBegin, parallelFunctions.end());
#endif
// If there are no parallel functions, we can just give up.
if (safeParallelFunctions.empty())
return false;
// Add main as a parallel function since Cilk requires this.
safeParallelFunctions.insert(mainFunc);
// (2,3) Transform each Cilk function and all its calls simply by
// adding a unique suffix to the function name.
// This should identify both functions and calls to such functions
// to the code generator.
// (4) Also, insert calls to sync at appropriate points.
Cilkifier cilkifier(M);
for (hash_set<Function*>::iterator CFI = safeParallelFunctions.begin(),
CFE = safeParallelFunctions.end(); CFI != CFE; ++CFI) {
cilkifier.TransformFunc(*CFI, safeParallelFunctions,
getAnalysis<PgmDependenceGraph>().getGraph(**CFI));
/* getAnalysis<MemoryDepAnalysis>().getGraph(**CFI)); */
}
return true;
}

View File

@ -1,258 +0,0 @@
//===- PgmDependenceGraph.cpp - Enumerate PDG for a function ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The Program Dependence Graph (PDG) for a single function represents all
// data and control dependences for the function. This file provides an
// iterator to enumerate all these dependences. In particular, it enumerates:
//
// -- Data dependences on memory locations, computed using the
// MemoryDepAnalysis pass;
// -- Data dependences on SSA registers, directly from Def-Use edges of Values;
// -- Control dependences, computed using postdominance frontiers
// (NOT YET IMPLEMENTED).
//
// Note that this file does not create an explicit dependence graph --
// it only provides an iterator to traverse the PDG conceptually.
// The MemoryDepAnalysis does build an explicit graph, which is used internally
// here. That graph could be augmented with the other dependences above if
// desired, but for most uses there will be little need to do that.
//
//===----------------------------------------------------------------------===//
#include "PgmDependenceGraph.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Function.h"
#include <iostream>
namespace llvm {
//----------------------------------------------------------------------------
// class DepIterState
//----------------------------------------------------------------------------
const DepIterState::IterStateFlags DepIterState::NoFlag = 0x0;
const DepIterState::IterStateFlags DepIterState::MemDone = 0x1;
const DepIterState::IterStateFlags DepIterState::SSADone = 0x2;
const DepIterState::IterStateFlags DepIterState::AllDone = 0x4;
const DepIterState::IterStateFlags DepIterState::FirstTimeFlag= 0x8;
// Find the first memory dependence for the current Mem In/Out iterators.
// Find the first memory dependence for the current Mem In/Out iterators.
// Sets dep to that dependence and returns true if one is found.
//
bool DepIterState::SetFirstMemoryDep()
{
if (! (depFlags & MemoryDeps))
return false;
bool doIncomingDeps = dep.getDepType() & IncomingFlag;
if (( doIncomingDeps && memDepIter == memDepGraph->inDepEnd( *depNode)) ||
(!doIncomingDeps && memDepIter == memDepGraph->outDepEnd(*depNode)))
{
iterFlags |= MemDone;
return false;
}
dep = *memDepIter; // simple copy from dependence in memory DepGraph
return true;
}
// Find the first valid data dependence for the current SSA In/Out iterators.
// A valid data dependence is one that is to/from an Instruction.
// E.g., an SSA edge from a formal parameter is not a valid dependence.
// Sets dep to that dependence and returns true if a valid one is found.
// Returns false and leaves dep unchanged otherwise.
//
bool DepIterState::SetFirstSSADep()
{
if (! (depFlags & SSADeps))
return false;
bool doIncomingDeps = dep.getDepType() & IncomingFlag;
Instruction* firstTarget = NULL;
// Increment the In or Out iterator till it runs out or we find a valid dep
if (doIncomingDeps)
for (Instruction::op_iterator E = depNode->getInstr().op_end();
ssaInEdgeIter != E &&
(firstTarget = dyn_cast<Instruction>(ssaInEdgeIter))== NULL; )
++ssaInEdgeIter;
else
for (Value::use_iterator E = depNode->getInstr().use_end();
ssaOutEdgeIter != E &&
(firstTarget = dyn_cast<Instruction>(*ssaOutEdgeIter)) == NULL; )
++ssaOutEdgeIter;
// If the iterator ran out before we found a valid dep, there isn't one.
if (!firstTarget)
{
iterFlags |= SSADone;
return false;
}
// Create a simple dependence object to represent this SSA dependence.
dep = Dependence(memDepGraph->getNode(*firstTarget, /*create*/ true),
TrueDependence, doIncomingDeps);
return true;
}
DepIterState::DepIterState(DependenceGraph* _memDepGraph,
Instruction& I,
bool incomingDeps,
PDGIteratorFlags whichDeps)
: memDepGraph(_memDepGraph),
depFlags(whichDeps),
iterFlags(NoFlag)
{
depNode = memDepGraph->getNode(I, /*create*/ true);
if (incomingDeps)
{
if (whichDeps & MemoryDeps) memDepIter= memDepGraph->inDepBegin(*depNode);
if (whichDeps & SSADeps) ssaInEdgeIter = I.op_begin();
/* Initialize control dependence iterator here. */
}
else
{
if (whichDeps & MemoryDeps) memDepIter=memDepGraph->outDepBegin(*depNode);
if (whichDeps & SSADeps) ssaOutEdgeIter = I.use_begin();
/* Initialize control dependence iterator here. */
}
// Set the dependence to the first of a memory dep or an SSA dep
// and set the done flag if either is found. Otherwise, set the
// init flag to indicate that the iterators have just been initialized.
//
if (!SetFirstMemoryDep() && !SetFirstSSADep())
iterFlags |= AllDone;
else
iterFlags |= FirstTimeFlag;
}
// Helper function for ++ operator that bumps iterator by 1 (to next
// dependence) and resets the dep field to represent the new dependence.
//
void DepIterState::Next()
{
// firstMemDone and firstSsaDone are used to indicate when the memory or
// SSA iterators just ran out, or when this is the very first increment.
// In either case, the next iterator (if any) should not be incremented.
//
bool firstMemDone = iterFlags & FirstTimeFlag;
bool firstSsaDone = iterFlags & FirstTimeFlag;
bool doIncomingDeps = dep.getDepType() & IncomingFlag;
if (depFlags & MemoryDeps && ! (iterFlags & MemDone))
{
iterFlags &= ~FirstTimeFlag; // clear "firstTime" flag
++memDepIter;
if (SetFirstMemoryDep())
return;
firstMemDone = true; // flags that we _just_ rolled over
}
if (depFlags & SSADeps && ! (iterFlags & SSADone))
{
// Don't increment the SSA iterator if we either just rolled over from
// the memory dep iterator, or if the SSA iterator is already done.
iterFlags &= ~FirstTimeFlag; // clear "firstTime" flag
if (! firstMemDone)
if (doIncomingDeps) ++ssaInEdgeIter;
else ++ssaOutEdgeIter;
if (SetFirstSSADep())
return;
firstSsaDone = true; // flags if we just rolled over
}
if ((depFlags & ControlDeps) != 0)
{
assert(0 && "Cannot handle control deps");
// iterFlags &= ~FirstTimeFlag; // clear "firstTime" flag
}
// This iterator is now complete.
iterFlags |= AllDone;
}
//----------------------------------------------------------------------------
// class PgmDependenceGraph
//----------------------------------------------------------------------------
// MakeIterator -- Create and initialize an iterator as specified.
//
PDGIterator PgmDependenceGraph::MakeIterator(Instruction& I,
bool incomingDeps,
PDGIteratorFlags whichDeps)
{
assert(memDepGraph && "Function not initialized!");
return PDGIterator(new DepIterState(memDepGraph, I, incomingDeps, whichDeps));
}
void PgmDependenceGraph::printOutgoingSSADeps(Instruction& I,
std::ostream &O)
{
iterator SI = this->outDepBegin(I, SSADeps);
iterator SE = this->outDepEnd(I, SSADeps);
if (SI == SE)
return;
O << "\n Outgoing SSA dependences:\n";
for ( ; SI != SE; ++SI)
{
O << "\t";
SI->print(O);
O << " to instruction:";
O << SI->getSink()->getInstr();
}
}
void PgmDependenceGraph::print(std::ostream &O, const Module*) const
{
MemoryDepAnalysis& graphSet = getAnalysis<MemoryDepAnalysis>();
// TEMPORARY LOOP
for (hash_map<Function*, DependenceGraph*>::iterator
I = graphSet.funcMap.begin(), E = graphSet.funcMap.end();
I != E; ++I)
{
Function* func = I->first;
DependenceGraph* depGraph = I->second;
const_cast<PgmDependenceGraph*>(this)->runOnFunction(*func);
O << "DEPENDENCE GRAPH FOR FUNCTION " << func->getName() << ":\n";
for (Function::iterator BB=func->begin(), FE=func->end(); BB != FE; ++BB)
for (BasicBlock::iterator II=BB->begin(), IE=BB->end(); II !=IE; ++II)
{
DepGraphNode* dgNode = depGraph->getNode(*II, /*create*/ true);
dgNode->print(O);
const_cast<PgmDependenceGraph*>(this)->printOutgoingSSADeps(*II, O);
}
} // END TEMPORARY LOOP
}
void PgmDependenceGraph::dump() const
{
this->print(std::cerr);
}
static RegisterAnalysis<PgmDependenceGraph>
Z("pgmdep", "Enumerate Program Dependence Graph (data and control)");
} // End llvm namespace

View File

@ -1,301 +0,0 @@
//===- PgmDependenceGraph.h - Enumerate the PDG for a function --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The Program Dependence Graph (PDG) for a single function represents all
// data and control dependences for the function. This file provides an
// iterator to enumerate all these dependences. In particular, it enumerates:
//
// -- Data dependences on memory locations, computed using the
// MemoryDepAnalysis pass;
// -- Data dependences on SSA registers, directly from Def-Use edges of Values;
// -- Control dependences, computed using postdominance frontiers
// (NOT YET IMPLEMENTED).
//
// Note that this file does not create an explicit dependence graph --
// it only provides an iterator to traverse the PDG conceptually.
// The MemoryDepAnalysis does build an explicit graph, which is used internally
// here. That graph could be augmented with the other dependences above if
// desired, but for most uses there will be little need to do that.
//
// Key Classes:
//
// enum PDGIteratorFlags -- Specify which dependences to enumerate.
//
// class PDGIterator -- The PDG iterator. This is essentially like a
// pointer to class Dependence, but doesn't explicitly
// construct a Dependence object for each dependence.
//
// class PgmDependenceGraph -- Interface to obtain PDGIterators for each
// instruction.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_PGMDEPENDENCEGRAPH_H
#define LLVM_ANALYSIS_PGMDEPENDENCEGRAPH_H
#include "MemoryDepAnalysis.h"
/* #include "llvm/Analysis/PostDominators.h" -- see below */
#include "llvm/Instruction.h"
#include "llvm/Pass.h"
#include "llvm/ADT/iterator"
namespace llvm {
class DSGraph;
class DependenceGraph;
class PgmDependenceGraph;
//---------------------------------------------------------------------------
/// enum PDGIteratorFlags - specify which dependences incident on a statement
/// are to be enumerated: Memory deps, SSA deps, Control deps, or any
/// combination thereof.
///
enum PDGIteratorFlags {
MemoryDeps = 0x1, // load/store/call deps
SSADeps = 0x2, // SSA deps (true)
ControlDeps = /* 0x4*/ 0x0, // control dependences
AllDataDeps = MemoryDeps | SSADeps, // shorthand for data deps
AllDeps = MemoryDeps | SSADeps | ControlDeps // shorthand for all three
};
//---------------------------------------------------------------------------
/// struct DepIterState - an internal implementation detail.
/// It are exposed here only to give inlinable access to field dep,
/// which is the representation for the current dependence pointed to by
/// a PgmDependenceGraph::iterator.
///
class DepIterState {
private:
typedef char IterStateFlags;
static const IterStateFlags NoFlag, MemDone, SSADone, AllDone, FirstTimeFlag;
public:
DepGraphNode* depNode; // the node being enumerated
DependenceGraph::iterator memDepIter; // pointer to current memory dep
Instruction::op_iterator ssaInEdgeIter; // pointer to current SSA in-dep
Value::use_iterator ssaOutEdgeIter; // pointer to current SSA out-dep
DependenceGraph* memDepGraph; // the core dependence graph
Dependence dep; // the "current" dependence
PDGIteratorFlags depFlags:8; // which deps are we enumerating?
IterStateFlags iterFlags:8; // marking where the iter stands
DepIterState(DependenceGraph* _memDepGraph,
Instruction& I,
bool incomingDeps,
PDGIteratorFlags whichDeps);
bool operator==(const DepIterState& S) {
assert(memDepGraph == S.memDepGraph &&
"Incompatible iterators! This is a probable sign of something BAD.");
return (iterFlags == S.iterFlags &&
dep == S.dep && depFlags == S.depFlags && depNode == S.depNode &&
memDepIter == S.memDepIter && ssaInEdgeIter == S.ssaInEdgeIter &&
ssaOutEdgeIter == S.ssaOutEdgeIter);
}
// Is the iteration completely done?
//
bool done() const { return iterFlags & AllDone; }
/// Next - Bump this iterator logically by 1 (to next dependence) and reset
/// the dep field to represent the new dependence if there is one.
/// Set done = true otherwise.
///
void Next();
/// SetFirstMemoryDep - Find the first memory dependence for the current Mem
/// In/Out iterators. Sets dep to that dependence and returns true if one is
/// found. Returns false and leaves dep unchanged otherwise.
///
bool SetFirstMemoryDep();
/// SetFirstSSADep - Find the next valid data dependence for the current SSA
/// In/Out iterators. A valid data dependence is one that is to/from an
/// Instruction. E.g., an SSA edge from a formal parameter is not a valid
/// dependence. Sets dep to that dependence and returns true if a valid one is
/// found. Returns false and leaves dep unchanged otherwise.
///
bool SetFirstSSADep();
};
//---------------------------------------------------------------------------
/// PDGIterator Class - represents a pointer to a single dependence in the
/// program dependence graph. It is essentially like a pointer to an object of
/// class Dependence but it is much more efficient to retrieve information about
/// the dependence directly rather than constructing the equivalent Dependence
/// object (since that object is normally not constructed for SSA def-use
/// dependences).
///
class PDGIterator: public forward_iterator<Dependence, ptrdiff_t> {
DepIterState* istate;
#if 0
/*copy*/ PDGIterator (const PDGIterator& I); // do not implement!
PDGIterator& operator= (const PDGIterator& I); // do not implement!
/*copy*/ PDGIterator (PDGIterator& I) : istate(I.istate) {
I.istate = NULL; // ensure this is not deleted twice.
}
#endif
friend class PgmDependenceGraph;
public:
typedef PDGIterator _Self;
PDGIterator(DepIterState* _istate) : istate(_istate) {}
~PDGIterator() { delete istate; }
PDGIterator(const PDGIterator& I) :istate(new DepIterState(*I.istate)) {}
PDGIterator& operator=(const PDGIterator& I) {
if (istate) delete istate;
istate = new DepIterState(*I.istate);
return *this;
}
/// fini - check if the iteration is complete
///
bool fini() const { return !istate || istate->done(); }
// Retrieve the underlying Dependence. Returns NULL if fini().
//
Dependence* operator*() const { return fini() ? NULL : &istate->dep; }
Dependence* operator->() const { assert(!fini()); return &istate->dep; }
// Increment the iterator
//
_Self& operator++() { if (!fini()) istate->Next(); return *this;}
_Self& operator++(int); // do not implement!
// Equality comparison: a "null" state should compare equal to done
// This is efficient for comparing with "end" or with itself, but could
// be quite inefficient for other cases.
//
bool operator==(const PDGIterator& I) const {
if (I.istate == NULL) // most common case: iter == end()
return (istate == NULL || istate->done());
if (istate == NULL)
return (I.istate == NULL || I.istate->done());
return (*istate == *I.istate);
}
bool operator!=(const PDGIterator& I) const {
return ! (*this == I);
}
};
///---------------------------------------------------------------------------
/// class PgmDependenceGraph:
///
/// This pass enumerates dependences incident on each instruction in a function.
/// It can be made a FunctionPass once a Pass (such as Parallelize) is
/// allowed to use a FunctionPass such as this one.
///---------------------------------------------------------------------------
class PgmDependenceGraph: public ModulePass {
/// Information about the function being analyzed.
///
DependenceGraph* memDepGraph;
// print helper function.
void printOutgoingSSADeps(Instruction& I, std::ostream &O);
/// MakeIterator - creates and initializes an iterator as specified.
///
PDGIterator MakeIterator(Instruction& I,
bool incomingDeps,
PDGIteratorFlags whichDeps);
/// MakeIterator - creates a null iterator representing end-of-iteration.
///
PDGIterator MakeIterator() { return PDGIterator(NULL); }
friend class PDGIterator;
friend class DepIterState;
public:
typedef PDGIterator iterator;
/* typedef PDGIterator<const Dependence> const iterator; */
public:
PgmDependenceGraph() : memDepGraph(NULL) {}
~PgmDependenceGraph() {}
/// Iterators to enumerate the program dependence graph for a function.
/// Note that this does not provide "end" iterators to check for completion.
/// Instead, just use iterator::fini() or iterator::operator*() == NULL
///
iterator inDepBegin(Instruction& I, PDGIteratorFlags whichDeps = AllDeps) {
return MakeIterator(I, /*inDeps*/ true, whichDeps);
}
iterator inDepEnd (Instruction& I, PDGIteratorFlags whichDeps = AllDeps) {
return MakeIterator();
}
iterator outDepBegin(Instruction& I, PDGIteratorFlags whichDeps = AllDeps) {
return MakeIterator(I, /*inDeps*/ false, whichDeps);
}
iterator outDepEnd (Instruction& I, PDGIteratorFlags whichDeps = AllDeps) {
return MakeIterator();
}
//------------------------------------------------------------------------
/// TEMPORARY FUNCTIONS TO MAKE THIS A MODULE PASS ---
/// These functions will go away once this class becomes a FunctionPass.
/// Driver function to compute dependence graphs for every function.
///
bool runOnModule(Module& M) { return true; }
/// getGraph() -- Retrieve the pgm dependence graph for a function.
/// This is temporary and will go away once this is a FunctionPass.
/// At that point, this class itself will be the PgmDependenceGraph you want.
///
PgmDependenceGraph& getGraph(Function& F) {
Visiting(F);
return *this;
}
private:
void Visiting(Function& F) {
memDepGraph = &getAnalysis<MemoryDepAnalysis>().getGraph(F);
}
public:
///----END TEMPORARY FUNCTIONS---------------------------------------------
/// This initializes the program dependence graph iterator for a function.
///
bool runOnFunction(Function& func) {
Visiting(func);
return true;
}
/// getAnalysisUsage - This does not modify anything.
/// It uses the Memory Dependence Analysis pass.
/// It needs to use the PostDominanceFrontier pass, but cannot because
/// that is a FunctionPass. This means control dependence are not emumerated.
///
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MemoryDepAnalysis>();
/* AU.addRequired<PostDominanceFrontier>(); */
}
/// Debugging support methods
///
void print(std::ostream &O, const Module* = 0) const;
void dump() const;
};
} // End llvm namespace
#endif