[Orc] New JIT APIs.

This patch adds a new set of JIT APIs to LLVM. The aim of these new APIs is to
cleanly support a wider range of JIT use cases in LLVM, and encourage the
development and contribution of re-usable infrastructure for LLVM JIT use-cases.

These APIs are intended to live alongside the MCJIT APIs, and should not affect
existing clients.

Included in this patch:

1) New headers in include/llvm/ExecutionEngine/Orc that provide a set of
   components for building JIT infrastructure.
   Implementation code for these headers lives in lib/ExecutionEngine/Orc.

2) A prototype re-implementation of MCJIT (OrcMCJITReplacement) built out of the
   new components.

3) Minor changes to RTDyldMemoryManager needed to support the new components.
   These changes should not impact existing clients.

4) A new flag for lli, -use-orcmcjit, which will cause lli to use the
   OrcMCJITReplacement class as its underlying execution engine, rather than
   MCJIT itself.

Tests to follow shortly.

Special thanks to Michael Ilseman, Pete Cooper, David Blaikie, Eric Christopher,
Justin Bogner, and Jim Grosbach for extensive feedback and discussion.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@226940 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Lang Hames 2015-01-23 21:25:00 +00:00
parent d5dc4cff6a
commit 63cc4f56a9
29 changed files with 2428 additions and 8 deletions

View File

@ -143,6 +143,12 @@ protected:
std::string *ErrorStr,
std::unique_ptr<RTDyldMemoryManager> MCJMM,
std::unique_ptr<TargetMachine> TM);
static ExecutionEngine *(*OrcMCJITReplacementCtor)(
std::string *ErrorStr,
std::unique_ptr<RTDyldMemoryManager> OrcJMM,
std::unique_ptr<TargetMachine> TM);
static ExecutionEngine *(*InterpCtor)(std::unique_ptr<Module> M,
std::string *ErrorStr);
@ -464,6 +470,7 @@ public:
}
protected:
ExecutionEngine() : EEState(*this) {}
explicit ExecutionEngine(std::unique_ptr<Module> M);
void emitGlobals();
@ -501,11 +508,15 @@ private:
std::string MCPU;
SmallVector<std::string, 4> MAttrs;
bool VerifyModules;
bool UseOrcMCJITReplacement;
/// InitEngine - Does the common initialization of default options.
void InitEngine();
public:
/// Default constructor for EngineBuilder.
EngineBuilder();
/// Constructor for EngineBuilder.
EngineBuilder(std::unique_ptr<Module> M);
@ -590,6 +601,11 @@ public:
return *this;
}
// \brief Use OrcMCJITReplacement instead of MCJIT. Off by default.
void setUseOrcMCJITReplacement(bool UseOrcMCJITReplacement) {
this->UseOrcMCJITReplacement = UseOrcMCJITReplacement;
}
TargetMachine *selectTarget();
/// selectTarget - Pick a target either via -march or by guessing the native

View File

@ -0,0 +1,63 @@
//===- ObjectMemoryBuffer.h - SmallVector-backed MemoryBuffrer -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares a wrapper class to hold the memory into which an
// object will be generated.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_OBJECTMEMORYBUFFER_H
#define LLVM_EXECUTIONENGINE_OBJECTMEMORYBUFFER_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
/// \brief SmallVector-backed MemoryBuffer instance.
///
/// This class enables efficient construction of MemoryBuffers from SmallVector
/// instances. This is useful for MCJIT and Orc, where object files are streamed
/// into SmallVectors, then inspected using ObjectFile (which takes a
/// MemoryBuffer).
class ObjectMemoryBuffer : public MemoryBuffer {
public:
/// \brief Construct an ObjectMemoryBuffer from the given SmallVector r-value.
///
/// FIXME: It'd be nice for this to be a non-templated constructor taking a
/// SmallVectorImpl here instead of a templated one taking a SmallVector<N>,
/// but SmallVector's move-construction/assignment currently only take
/// SmallVectors. If/when that is fixed we can simplify this constructor and
/// the following one.
ObjectMemoryBuffer(SmallVectorImpl<char> &&SV)
: SV(std::move(SV)), BufferName("<in-memory object>") {
init(this->SV.begin(), this->SV.end(), false);
}
/// \brief Construct a named ObjectMemoryBuffer from the given SmallVector
/// r-value and StringRef.
ObjectMemoryBuffer(SmallVectorImpl<char> &&SV, StringRef Name)
: SV(std::move(SV)), BufferName(Name) {
init(this->SV.begin(), this->SV.end(), false);
}
const char* getBufferIdentifier() const override { return BufferName.c_str(); }
BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
private:
SmallVector<char, 0> SV;
std::string BufferName;
};
} // namespace llvm
#endif

View File

@ -0,0 +1,44 @@
//===-- CloneSubModule.h - Utilities for extracting sub-modules -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for extracting sub-modules. Useful for breaking up modules
// for lazy jitting.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_CLONESUBMODULE_H
#define LLVM_EXECUTIONENGINE_ORC_CLONESUBMODULE_H
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <functional>
namespace llvm {
class Function;
class GlobalVariable;
class Module;
typedef std::function<void(GlobalVariable &, const GlobalVariable &,
ValueToValueMapTy &)> HandleGlobalVariableFtor;
typedef std::function<void(Function &, const Function &, ValueToValueMapTy &)>
HandleFunctionFtor;
void copyGVInitializer(GlobalVariable &New, const GlobalVariable &Orig,
ValueToValueMapTy &VMap);
void copyFunctionBody(Function &New, const Function &Orig,
ValueToValueMapTy &VMap);
std::unique_ptr<Module>
CloneSubModule(const Module &M, HandleGlobalVariableFtor HandleGlobalVariable,
HandleFunctionFtor HandleFunction, bool KeepInlineAsm);
}
#endif // LLVM_EXECUTIONENGINE_ORC_CLONESUBMODULE_H

View File

@ -0,0 +1,263 @@
//===- CompileOnDemandLayer.h - Compile each function on demand -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// JIT layer for breaking up modules and inserting callbacks to allow
// individual functions to be compiled on demand.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
#include "IndirectionUtils.h"
namespace llvm {
/// @brief Compile-on-demand layer.
///
/// Modules added to this layer have their calls indirected, and are then
/// broken up into a set of single-function modules, each of which is added
/// to the layer below in a singleton set. The lower layer can be any layer that
/// accepts IR module sets.
///
/// It is expected that this layer will frequently be used on top of a
/// LazyEmittingLayer. The combination of the two ensures that each function is
/// compiled only when it is first called.
template <typename BaseLayerT> class CompileOnDemandLayer {
public:
/// @brief Lookup helper that provides compatibility with the classic
/// static-compilation symbol resolution process.
///
/// The CompileOnDemand (COD) layer splits modules up into multiple
/// sub-modules, each held in its own llvm::Module instance, in order to
/// support lazy compilation. When a module that contains private symbols is
/// broken up symbol linkage changes may be required to enable access to
/// "private" data that now resides in a different llvm::Module instance. To
/// retain expected symbol resolution behavior for clients of the COD layer,
/// the CODScopedLookup class uses a two-tiered lookup system to resolve
/// symbols. Lookup first scans sibling modules that were split from the same
/// original module (logical-module scoped lookup), then scans all other
/// modules that have been added to the lookup scope (logical-dylib scoped
/// lookup).
class CODScopedLookup {
private:
typedef typename BaseLayerT::ModuleSetHandleT BaseLayerModuleSetHandleT;
typedef std::vector<BaseLayerModuleSetHandleT> SiblingHandlesList;
typedef std::list<SiblingHandlesList> PseudoDylibModuleSetHandlesList;
public:
/// @brief Handle for a logical module.
typedef typename PseudoDylibModuleSetHandlesList::iterator LMHandle;
/// @brief Construct a scoped lookup.
CODScopedLookup(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
/// @brief Start a new context for a single logical module.
LMHandle createLogicalModule() {
Handles.push_back(SiblingHandlesList());
return std::prev(Handles.end());
}
/// @brief Add a concrete Module's handle to the given logical Module's
/// lookup scope.
void addToLogicalModule(LMHandle LMH, BaseLayerModuleSetHandleT H) {
LMH->push_back(H);
}
/// @brief Remove a logical Module from the CODScopedLookup entirely.
void removeLogicalModule(LMHandle LMH) { Handles.erase(LMH); }
/// @brief Look up a symbol in this context.
uint64_t lookup(LMHandle LMH, const std::string &Name) {
if (uint64_t Addr = lookupOnlyIn(LMH, Name))
return Addr;
for (auto I = Handles.begin(), E = Handles.end(); I != E; ++I)
if (I != LMH)
if (uint64_t Addr = lookupOnlyIn(I, Name))
return Addr;
return 0;
}
private:
uint64_t lookupOnlyIn(LMHandle LMH, const std::string &Name) {
for (auto H : *LMH)
if (uint64_t Addr = BaseLayer.lookupSymbolAddressIn(H, Name, false))
return Addr;
return 0;
}
BaseLayerT &BaseLayer;
PseudoDylibModuleSetHandlesList Handles;
};
private:
typedef typename BaseLayerT::ModuleSetHandleT BaseLayerModuleSetHandleT;
typedef std::vector<BaseLayerModuleSetHandleT> BaseLayerModuleSetHandleListT;
struct ModuleSetInfo {
// Symbol lookup - just one for the whole module set.
std::shared_ptr<CODScopedLookup> Lookup;
// Logical module handles.
std::vector<typename CODScopedLookup::LMHandle> LMHandles;
// Persistent manglers - one per TU.
std::vector<PersistentMangler> PersistentManglers;
// Symbol resolution callback handlers - one per TU.
std::vector<std::unique_ptr<JITResolveCallbackHandler>>
JITResolveCallbackHandlers;
// List of vectors of module set handles:
// One vector per logical module - each vector holds the handles for the
// exploded modules for that logical module in the base layer.
BaseLayerModuleSetHandleListT BaseLayerModuleSetHandles;
ModuleSetInfo(std::shared_ptr<CODScopedLookup> Lookup)
: Lookup(std::move(Lookup)) {}
void releaseResources(BaseLayerT &BaseLayer) {
for (auto LMH : LMHandles)
Lookup->removeLogicalModule(LMH);
for (auto H : BaseLayerModuleSetHandles)
BaseLayer.removeModuleSet(H);
}
};
typedef std::list<ModuleSetInfo> ModuleSetInfoListT;
public:
/// @brief Handle to a set of loaded modules.
typedef typename ModuleSetInfoListT::iterator ModuleSetHandleT;
/// @brief Convenience typedef for callback inserter.
typedef std::function<void(Module&, JITResolveCallbackHandler&)>
InsertCallbackAsmFtor;
/// @brief Construct a compile-on-demand layer instance.
CompileOnDemandLayer(BaseLayerT &BaseLayer,
InsertCallbackAsmFtor InsertCallbackAsm)
: BaseLayer(BaseLayer), InsertCallbackAsm(InsertCallbackAsm) {}
/// @brief Add a module to the compile-on-demand layer.
template <typename ModuleSetT>
ModuleSetHandleT addModuleSet(ModuleSetT Ms,
std::unique_ptr<RTDyldMemoryManager> MM) {
const char *JITAddrSuffix = "$orc_addr";
const char *JITImplSuffix = "$orc_impl";
// Create a symbol lookup context and ModuleSetInfo for this module set.
auto DylibLookup = std::make_shared<CODScopedLookup>(BaseLayer);
ModuleSetHandleT H =
ModuleSetInfos.insert(ModuleSetInfos.end(), ModuleSetInfo(DylibLookup));
ModuleSetInfo &MSI = ModuleSetInfos.back();
// Process each of the modules in this module set. All modules share the
// same lookup context, but each will get its own TU lookup context.
for (auto &M : Ms) {
// Create a TU lookup context for this module.
auto LMH = DylibLookup->createLogicalModule();
MSI.LMHandles.push_back(LMH);
// Create a persistent mangler for this module.
MSI.PersistentManglers.emplace_back(*M->getDataLayout());
// Make all calls to functions defined in this module indirect.
JITIndirections Indirections =
makeCallsDoubleIndirect(*M, [](const Function &) { return true; },
JITImplSuffix, JITAddrSuffix);
// Then carve up the module into a bunch of single-function modules.
std::vector<std::unique_ptr<Module>> ExplodedModules =
explode(*M, Indirections);
// Add a resolve-callback handler for this module to look up symbol
// addresses when requested via a callback.
MSI.JITResolveCallbackHandlers.push_back(
createCallbackHandlerFromJITIndirections(
Indirections, MSI.PersistentManglers.back(),
[=](StringRef S) { return DylibLookup->lookup(LMH, S); }));
// Insert callback asm code into the first module.
InsertCallbackAsm(*ExplodedModules[0],
*MSI.JITResolveCallbackHandlers.back());
// Now we need to take each of the extracted Modules and add them to
// base layer. Each Module will be added individually to make sure they
// can be compiled separately, and each will get its own lookaside
// memory manager with lookup functors that resolve symbols in sibling
// modules first.OA
for (auto &M : ExplodedModules) {
std::vector<std::unique_ptr<Module>> MSet;
MSet.push_back(std::move(M));
BaseLayerModuleSetHandleT H = BaseLayer.addModuleSet(
std::move(MSet),
createLookasideRTDyldMM<SectionMemoryManager>(
[=](const std::string &Name) {
if (uint64_t Addr = DylibLookup->lookup(LMH, Name))
return Addr;
return getSymbolAddress(Name, true);
},
[=](const std::string &Name) {
return DylibLookup->lookup(LMH, Name);
}));
DylibLookup->addToLogicalModule(LMH, H);
MSI.BaseLayerModuleSetHandles.push_back(H);
}
initializeFuncAddrs(*MSI.JITResolveCallbackHandlers.back(), Indirections,
MSI.PersistentManglers.back(), [=](StringRef S) {
return DylibLookup->lookup(LMH, S);
});
}
return H;
}
/// @brief Remove the module represented by the given handle.
///
/// This will remove all modules in the layers below that were derived from
/// the module represented by H.
void removeModuleSet(ModuleSetHandleT H) {
H->releaseResources(BaseLayer);
ModuleSetInfos.erase(H);
}
/// @brief Get the address of a symbol provided by this layer, or some layer
/// below this one.
uint64_t getSymbolAddress(const std::string &Name, bool ExportedSymbolsOnly) {
return BaseLayer.getSymbolAddress(Name, ExportedSymbolsOnly);
}
/// @brief Get the address of a symbol provided by this layer, or some layer
/// below this one.
uint64_t lookupSymbolAddressIn(ModuleSetHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
BaseLayerModuleSetHandleListT &BaseLayerHandles = H->second;
for (auto &BH : BaseLayerHandles) {
if (uint64_t Addr =
BaseLayer.lookupSymbolAddressIn(BH, Name, ExportedSymbolsOnly))
return Addr;
}
return 0;
}
private:
BaseLayerT &BaseLayer;
InsertCallbackAsmFtor InsertCallbackAsm;
ModuleSetInfoListT ModuleSetInfos;
};
}
#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H

View File

@ -0,0 +1,59 @@
//===-- CompileUtils.h - Utilities for compiling IR in the JIT --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for compiling IR to object files.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
#include "llvm/PassManager.h"
#include "llvm/ExecutionEngine/ObjectMemoryBuffer.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
/// @brief Simple compile functor: Takes a single IR module and returns an
/// ObjectFile.
class SimpleCompiler {
public:
/// @brief Construct a simple compile functor with the given target.
SimpleCompiler(TargetMachine &TM) : TM(TM) {}
/// @brief Compile a Module to an ObjectFile.
object::OwningBinary<object::ObjectFile> operator()(Module &M) const {
SmallVector<char, 0> ObjBufferSV;
raw_svector_ostream ObjStream(ObjBufferSV);
PassManager PM;
MCContext *Ctx;
if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
llvm_unreachable("Target does not support MC emission.");
PM.run(M);
ObjStream.flush();
std::unique_ptr<MemoryBuffer> ObjBuffer(
new ObjectMemoryBuffer(std::move(ObjBufferSV)));
ErrorOr<std::unique_ptr<object::ObjectFile>> Obj =
object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
// TODO: Actually report errors helpfully.
typedef object::OwningBinary<object::ObjectFile> OwningObj;
if (Obj)
return OwningObj(std::move(*Obj), std::move(ObjBuffer));
return OwningObj(nullptr, nullptr);
}
private:
TargetMachine &TM;
};
}
#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H

View File

@ -0,0 +1,120 @@
//===------ IRCompileLayer.h -- Eagerly compile IR for JIT ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains the definition for a basic, eagerly compiling layer of the JIT.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
#define LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
#include "llvm/ExecutionEngine/ObjectCache.h"
#include <memory>
namespace llvm {
/// @brief Eager IR compiling layer.
///
/// This layer accepts sets of LLVM IR Modules (via addModuleSet). It
/// immediately compiles each IR module to an object file (each IR Module is
/// compiled separately). The resulting set of object files is then added to
/// the layer below, which must implement the object layer concept.
template <typename BaseLayerT> class IRCompileLayer {
public:
typedef std::function<object::OwningBinary<object::ObjectFile>(Module &)>
CompileFtor;
private:
typedef typename BaseLayerT::ObjSetHandleT ObjSetHandleT;
typedef std::vector<std::unique_ptr<object::ObjectFile>> OwningObjectVec;
typedef std::vector<std::unique_ptr<MemoryBuffer>> OwningBufferVec;
public:
/// @brief Handle to a set of compiled modules.
typedef ObjSetHandleT ModuleSetHandleT;
/// @brief Construct an IRCompileLayer with the given BaseLayer, which must
/// implement the ObjectLayer concept.
IRCompileLayer(BaseLayerT &BaseLayer, CompileFtor Compile)
: BaseLayer(BaseLayer), Compile(std::move(Compile)), ObjCache(nullptr) {}
/// @brief Set an ObjectCache to query before compiling.
void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }
/// @brief Compile each module in the given module set, then then add the
/// resulting set of objects to the base layer, along with the memory
// manager MM.
///
/// @return A handle for the added modules.
template <typename ModuleSetT>
ModuleSetHandleT addModuleSet(ModuleSetT Ms,
std::unique_ptr<RTDyldMemoryManager> MM) {
OwningObjectVec Objects;
OwningBufferVec Buffers;
for (const auto &M : Ms) {
std::unique_ptr<object::ObjectFile> Object;
std::unique_ptr<MemoryBuffer> Buffer;
if (ObjCache)
std::tie(Object, Buffer) = tryToLoadFromObjectCache(*M).takeBinary();
if (!Object) {
std::tie(Object, Buffer) = Compile(*M).takeBinary();
if (ObjCache)
ObjCache->notifyObjectCompiled(&*M, Buffer->getMemBufferRef());
}
Objects.push_back(std::move(Object));
Buffers.push_back(std::move(Buffer));
}
return BaseLayer.addObjectSet(std::move(Objects), std::move(MM));
}
/// @brief Remove the module set associated with the handle H.
void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeObjectSet(H); }
/// @brief Get the address of a loaded symbol. This call is forwarded to the
/// base layer's getSymbolAddress implementation.
uint64_t getSymbolAddress(const std::string &Name, bool ExportedSymbolsOnly) {
return BaseLayer.getSymbolAddress(Name, ExportedSymbolsOnly);
}
/// @brief Get the address of the given symbol in the context of the set of
/// compiled modules represented by the handle H. This call is
/// forwarded to the base layer's implementation.
uint64_t lookupSymbolAddressIn(ModuleSetHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
return BaseLayer.lookupSymbolAddressIn(H, Name, ExportedSymbolsOnly);
}
private:
object::OwningBinary<object::ObjectFile>
tryToLoadFromObjectCache(const Module &M) {
std::unique_ptr<MemoryBuffer> ObjBuffer = ObjCache->getObject(&M);
if (!ObjBuffer)
return {nullptr, nullptr};
ErrorOr<std::unique_ptr<object::ObjectFile>> Obj =
object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
if (!Obj)
return {nullptr, nullptr};
return {std::move(*Obj), std::move(ObjBuffer)};
}
BaseLayerT &BaseLayer;
CompileFtor Compile;
ObjectCache *ObjCache;
};
}
#endif // LLVM_EXECUTIONENGINE_ORC_IRCOMPILINGLAYER_H

View File

@ -0,0 +1,284 @@
//===-- IndirectionUtils.h - Utilities for adding indirections --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for adding indirections and breaking up modules.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include <sstream>
namespace llvm {
/// @brief Persistent name mangling.
///
/// This class provides name mangling that can outlive a Module (and its
/// DataLayout).
class PersistentMangler {
public:
PersistentMangler(DataLayout DL) : DL(std::move(DL)), M(&this->DL) {}
std::string getMangledName(StringRef Name) const {
std::string MangledName;
{
raw_string_ostream MangledNameStream(MangledName);
M.getNameWithPrefix(MangledNameStream, Name);
}
return MangledName;
}
private:
DataLayout DL;
Mangler M;
};
/// @brief Handle callbacks from the JIT process requesting the definitions of
/// symbols.
///
/// This utility is intended to be used to support compile-on-demand for
/// functions.
class JITResolveCallbackHandler {
private:
typedef std::vector<std::string> FuncNameList;
public:
typedef FuncNameList::size_type StubIndex;
public:
/// @brief Create a JITResolveCallbackHandler with the given functors for
/// looking up symbols and updating their use-sites.
///
/// @return A JITResolveCallbackHandler instance that will invoke the
/// Lookup and Update functors as needed to resolve missing symbol
/// definitions.
template <typename LookupFtor, typename UpdateFtor>
static std::unique_ptr<JITResolveCallbackHandler> create(LookupFtor Lookup,
UpdateFtor Update);
/// @brief Destroy instance. Does not modify existing emitted symbols.
///
/// Not-yet-emitted symbols will need to be resolved some other way after
/// this class is destroyed.
virtual ~JITResolveCallbackHandler() {}
/// @brief Add a function to be resolved on demand.
void addFuncName(std::string Name) { FuncNames.push_back(std::move(Name)); }
/// @brief Get the name associated with the given index.
const std::string &getFuncName(StubIndex Idx) const { return FuncNames[Idx]; }
/// @brief Returns the number of symbols being managed by this instance.
StubIndex getNumFuncs() const { return FuncNames.size(); }
/// @brief Get the address for the symbol associated with the given index.
///
/// This is expected to be called by code in the JIT process itself, in
/// order to resolve a function.
virtual uint64_t resolve(StubIndex StubIdx) = 0;
private:
FuncNameList FuncNames;
};
// Implementation class for JITResolveCallbackHandler.
template <typename LookupFtor, typename UpdateFtor>
class JITResolveCallbackHandlerImpl : public JITResolveCallbackHandler {
public:
JITResolveCallbackHandlerImpl(LookupFtor Lookup, UpdateFtor Update)
: Lookup(std::move(Lookup)), Update(std::move(Update)) {}
uint64_t resolve(StubIndex StubIdx) override {
const std::string &FuncName = getFuncName(StubIdx);
uint64_t Addr = Lookup(FuncName);
Update(FuncName, Addr);
return Addr;
}
private:
LookupFtor Lookup;
UpdateFtor Update;
};
template <typename LookupFtor, typename UpdateFtor>
std::unique_ptr<JITResolveCallbackHandler>
JITResolveCallbackHandler::create(LookupFtor Lookup, UpdateFtor Update) {
typedef JITResolveCallbackHandlerImpl<LookupFtor, UpdateFtor> Impl;
return make_unique<Impl>(std::move(Lookup), std::move(Update));
}
/// @brief Holds a list of the function names that were indirected, plus
/// mappings from each of these names to (a) the name of function
/// providing the implementation for that name (GetImplNames), and
/// (b) the name of the global variable holding the address of the
/// implementation.
///
/// This data structure can be used with a JITCallbackHandler to look up and
/// update function implementations when lazily compiling.
class JITIndirections {
public:
JITIndirections(std::vector<std::string> IndirectedNames,
std::function<std::string(StringRef)> GetImplName,
std::function<std::string(StringRef)> GetAddrName)
: IndirectedNames(std::move(IndirectedNames)),
GetImplName(std::move(GetImplName)),
GetAddrName(std::move(GetAddrName)) {}
std::vector<std::string> IndirectedNames;
std::function<std::string(StringRef Name)> GetImplName;
std::function<std::string(StringRef Name)> GetAddrName;
};
/// @brief Indirect all calls to functions matching the predicate
/// ShouldIndirect through a global variable containing the address
/// of the implementation.
///
/// @return An indirection structure containing the functions that had their
/// call-sites re-written.
///
/// For each function 'F' that meets the ShouldIndirect predicate, and that
/// is called in this Module, add a common-linkage global variable to the
/// module that will hold the address of the implementation of that function.
/// Rewrite all call-sites of 'F' to be indirect calls (via the global).
/// This allows clients, either directly or via a JITCallbackHandler, to
/// change the address of the implementation of 'F' at runtime.
///
/// Important notes:
///
/// Single indirection does not preserve pointer equality for 'F'. If the
/// program was already calling 'F' indirectly through function pointers, or
/// if it was taking the address of 'F' for the purpose of pointer comparisons
/// or arithmetic double indirection should be used instead.
///
/// This method does *not* initialize the function implementation addresses.
/// The client must do this prior to running any call-sites that have been
/// indirected.
JITIndirections makeCallsSingleIndirect(
llvm::Module &M,
const std::function<bool(const Function &)> &ShouldIndirect,
const char *JITImplSuffix, const char *JITAddrSuffix);
/// @brief Replace the body of functions matching the predicate ShouldIndirect
/// with indirect calls to the implementation.
///
/// @return An indirections structure containing the functions that had their
/// implementations re-written.
///
/// For each function 'F' that meets the ShouldIndirect predicate, add a
/// common-linkage global variable to the module that will hold the address of
/// the implementation of that function and rewrite the implementation of 'F'
/// to call through to the implementation indirectly (via the global).
/// This allows clients, either directly or via a JITCallbackHandler, to
/// change the address of the implementation of 'F' at runtime.
///
/// Important notes:
///
/// Double indirection is slower than single indirection, but preserves
/// function pointer relation tests and correct behavior for function pointers
/// (all calls to 'F', direct or indirect) go the address stored in the global
/// variable at the time of the call.
///
/// This method does *not* initialize the function implementation addresses.
/// The client must do this prior to running any call-sites that have been
/// indirected.
JITIndirections makeCallsDoubleIndirect(
llvm::Module &M,
const std::function<bool(const Function &)> &ShouldIndirect,
const char *JITImplSuffix, const char *JITAddrSuffix);
/// @brief Given a set of indirections and a symbol lookup functor, create a
/// JITResolveCallbackHandler instance that will resolve the
/// implementations for the indirected symbols on demand.
template <typename SymbolLookupFtor>
std::unique_ptr<JITResolveCallbackHandler>
createCallbackHandlerFromJITIndirections(const JITIndirections &Indirs,
const PersistentMangler &NM,
SymbolLookupFtor Lookup) {
auto GetImplName = Indirs.GetImplName;
auto GetAddrName = Indirs.GetAddrName;
std::unique_ptr<JITResolveCallbackHandler> J =
JITResolveCallbackHandler::create(
[=](const std::string &S) {
return Lookup(NM.getMangledName(GetImplName(S)));
},
[=](const std::string &S, uint64_t Addr) {
void *ImplPtr = reinterpret_cast<void *>(
Lookup(NM.getMangledName(GetAddrName(S))));
memcpy(ImplPtr, &Addr, sizeof(uint64_t));
});
for (const auto &FuncName : Indirs.IndirectedNames)
J->addFuncName(FuncName);
return J;
}
/// @brief Insert callback asm into module M for the symbols managed by
/// JITResolveCallbackHandler J.
void insertX86CallbackAsm(Module &M, JITResolveCallbackHandler &J);
/// @brief Initialize global indirects to point into the callback asm.
template <typename LookupFtor>
void initializeFuncAddrs(JITResolveCallbackHandler &J,
const JITIndirections &Indirs,
const PersistentMangler &NM, LookupFtor Lookup) {
// Forward declare so that we can access this, even though it's an
// implementation detail.
std::string getJITResolveCallbackIndexLabel(unsigned I);
if (J.getNumFuncs() == 0)
return;
// Force a look up one of the global addresses for a function that has been
// indirected. We need to do this to trigger the emission of the module
// holding the callback asm. We can't rely on that emission happening
// automatically when we look up the callback asm symbols, since lazy-emitting
// layers can't see those.
Lookup(NM.getMangledName(Indirs.GetAddrName(J.getFuncName(0))));
// Now update indirects to point to the JIT resolve callback asm.
for (JITResolveCallbackHandler::StubIndex I = 0; I < J.getNumFuncs(); ++I) {
uint64_t ResolveCallbackIdxAddr =
Lookup(getJITResolveCallbackIndexLabel(I));
void *AddrPtr = reinterpret_cast<void *>(
Lookup(NM.getMangledName(Indirs.GetAddrName(J.getFuncName(I)))));
assert(AddrPtr && "Can't find stub addr global to initialize.");
memcpy(AddrPtr, &ResolveCallbackIdxAddr, sizeof(uint64_t));
}
}
/// @brief Extract all functions matching the predicate ShouldExtract in to
/// their own modules. (Does not modify the original module.)
///
/// @return A set of modules, the first containing all symbols (including
/// globals and aliases) that did not pass ShouldExtract, and each
/// subsequent module containing one of the functions that did meet
/// ShouldExtract.
///
/// By adding the resulting modules separately (not as a set) to a
/// LazyEmittingLayer instance, compilation can be deferred until symbols are
/// actually needed.
std::vector<std::unique_ptr<llvm::Module>>
explode(const llvm::Module &OrigMod,
const std::function<bool(const Function &)> &ShouldExtract);
/// @brief Given a module that has been indirectified, break each function
/// that has been indirected out into its own module. (Does not modify
/// the original module).
///
/// @returns A set of modules covering the symbols provided by OrigMod.
std::vector<std::unique_ptr<llvm::Module>>
explode(const llvm::Module &OrigMod, const JITIndirections &Indirections);
}
#endif // LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H

View File

@ -0,0 +1,256 @@
//===- LazyEmittingLayer.h - Lazily emit IR to lower JIT layers -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains the definition for a lazy-emitting layer for the JIT.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
#include "LookasideRTDyldMM.h"
#include "llvm/IR/Mangler.h"
#include <list>
namespace llvm {
/// @brief Lazy-emitting IR layer.
///
/// This layer accepts sets of LLVM IR Modules (via addModuleSet), but does
/// not immediately emit them the layer below. Instead, emissing to the base
/// layer is deferred until some symbol in the module set is requested via
/// getSymbolAddress.
template <typename BaseLayerT> class LazyEmittingLayer {
public:
typedef typename BaseLayerT::ModuleSetHandleT BaseLayerHandleT;
private:
class EmissionDeferredSet {
public:
EmissionDeferredSet() : EmitState(NotEmitted) {}
virtual ~EmissionDeferredSet() {}
uint64_t Search(StringRef Name, bool ExportedSymbolsOnly, BaseLayerT &B) {
switch (EmitState) {
case NotEmitted:
if (Provides(Name, ExportedSymbolsOnly)) {
EmitState = Emitting;
Handle = Emit(B);
EmitState = Emitted;
} else
return 0;
break;
case Emitting:
// The module has been added to the base layer but we haven't gotten a
// handle back yet so we can't use lookupSymbolAddressIn. Just return
// '0' here - LazyEmittingLayer::getSymbolAddress will do a global
// search in the base layer when it doesn't find the symbol here, so
// we'll find it in the end.
return 0;
case Emitted:
// Nothing to do. Go ahead and search the base layer.
break;
}
return B.lookupSymbolAddressIn(Handle, Name, ExportedSymbolsOnly);
}
void RemoveModulesFromBaseLayer(BaseLayerT &BaseLayer) {
if (EmitState != NotEmitted)
BaseLayer.removeModuleSet(Handle);
}
template <typename ModuleSetT>
static std::unique_ptr<EmissionDeferredSet>
create(BaseLayerT &B, ModuleSetT Ms,
std::unique_ptr<RTDyldMemoryManager> MM);
protected:
virtual bool Provides(StringRef Name, bool ExportedSymbolsOnly) const = 0;
virtual BaseLayerHandleT Emit(BaseLayerT &BaseLayer) = 0;
private:
enum { NotEmitted, Emitting, Emitted } EmitState;
BaseLayerHandleT Handle;
};
template <typename ModuleSetT>
class EmissionDeferredSetImpl : public EmissionDeferredSet {
public:
EmissionDeferredSetImpl(ModuleSetT Ms,
std::unique_ptr<RTDyldMemoryManager> MM)
: Ms(std::move(Ms)), MM(std::move(MM)) {}
protected:
BaseLayerHandleT Emit(BaseLayerT &BaseLayer) override {
// We don't need the mangled names set any more: Once we've emitted this
// to the base layer we'll just look for symbols there.
MangledNames.reset();
return BaseLayer.addModuleSet(std::move(Ms), std::move(MM));
}
bool Provides(StringRef Name, bool ExportedSymbolsOnly) const override {
// FIXME: We could clean all this up if we had a way to reliably demangle
// names: We could just demangle name and search, rather than
// mangling everything else.
// If we have already built the mangled name set then just search it.
if (MangledNames) {
auto VI = MangledNames->find(Name);
if (VI == MangledNames->end())
return false;
return !ExportedSymbolsOnly || VI->second;
}
// If we haven't built the mangled name set yet, try to build it. As an
// optimization this will leave MangledNames set to nullptr if we find
// Name in the process of building the set.
buildMangledNames(Name, ExportedSymbolsOnly);
if (!MangledNames)
return true;
return false;
}
private:
// If the mangled name of the given GlobalValue matches the given search
// name (and its visibility conforms to the ExportedSymbolsOnly flag) then
// just return 'true'. Otherwise, add the mangled name to the Names map and
// return 'false'.
bool addGlobalValue(StringMap<bool> &Names, const GlobalValue &GV,
const Mangler &Mang, StringRef SearchName,
bool ExportedSymbolsOnly) const {
// Modules don't "provide" decls or common symbols.
if (GV.isDeclaration() || GV.hasCommonLinkage())
return false;
// Mangle the GV name.
std::string MangledName;
{
raw_string_ostream MangledNameStream(MangledName);
Mang.getNameWithPrefix(MangledNameStream, &GV, false);
}
// Check whether this is the name we were searching for, and if it is then
// bail out early.
if (MangledName == SearchName)
if (!ExportedSymbolsOnly || GV.hasDefaultVisibility())
return true;
// Otherwise add this to the map for later.
Names[MangledName] = GV.hasDefaultVisibility();
return false;
}
// Build the MangledNames map. Bails out early (with MangledNames left set
// to nullptr) if the given SearchName is found while building the map.
void buildMangledNames(StringRef SearchName,
bool ExportedSymbolsOnly) const {
assert(!MangledNames && "Mangled names map already exists?");
auto Names = llvm::make_unique<StringMap<bool>>();
for (const auto &M : Ms) {
Mangler Mang(M->getDataLayout());
for (const auto &GV : M->globals())
if (addGlobalValue(*Names, GV, Mang, SearchName, ExportedSymbolsOnly))
return;
for (const auto &F : *M)
if (addGlobalValue(*Names, F, Mang, SearchName, ExportedSymbolsOnly))
return;
}
MangledNames = std::move(Names);
}
ModuleSetT Ms;
std::unique_ptr<RTDyldMemoryManager> MM;
mutable std::unique_ptr<StringMap<bool>> MangledNames;
};
typedef std::list<std::unique_ptr<EmissionDeferredSet>> ModuleSetListT;
BaseLayerT &BaseLayer;
ModuleSetListT ModuleSetList;
public:
/// @brief Handle to a set of loaded modules.
typedef typename ModuleSetListT::iterator ModuleSetHandleT;
/// @brief Construct a lazy emitting layer.
LazyEmittingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
/// @brief Add the given set of modules to the lazy emitting layer.
///
/// This method stores the set of modules in a side table, rather than
/// immediately emitting them to the next layer of the JIT. When the address
/// of a symbol provided by this set is requested (via getSymbolAddress) it
/// triggers the emission of this set to the layer below (along with the given
/// memory manager instance), and returns the address of the requested symbol.
template <typename ModuleSetT>
ModuleSetHandleT addModuleSet(ModuleSetT Ms,
std::unique_ptr<RTDyldMemoryManager> MM) {
return ModuleSetList.insert(
ModuleSetList.end(),
EmissionDeferredSet::create(BaseLayer, std::move(Ms), std::move(MM)));
}
/// @brief Remove the module set represented by the given handle.
///
/// This method will free the memory associated with the given module set,
/// both in this layer, and the base layer.
void removeModuleSet(ModuleSetHandleT H) {
H->RemoveModulesFromBaseLayer();
ModuleSetList.erase(H);
}
/// @brief Get the address of a symbol provided by this layer, or some layer
/// below this one.
///
/// When called for a symbol that has been added to this layer (via
/// addModuleSet) but not yet emitted, this will trigger the emission of the
/// module set containing the definiton of the symbol.
uint64_t getSymbolAddress(const std::string &Name, bool ExportedSymbolsOnly) {
// Look up symbol among existing definitions.
if (uint64_t Addr = BaseLayer.getSymbolAddress(Name, ExportedSymbolsOnly))
return Addr;
// If not found then search the deferred sets. The call to 'Search' will
// cause the set to be emitted to the next layer if it provides a definition
// of 'Name'.
for (auto &DeferredSet : ModuleSetList)
if (uint64_t Addr =
DeferredSet->Search(Name, ExportedSymbolsOnly, BaseLayer))
return Addr;
// If no definition found anywhere return 0.
return 0;
}
/// @brief Get the address of the given symbol in the context of the set of
/// compiled modules represented by the handle H. This call is
/// forwarded to the base layer's implementation.
uint64_t lookupSymbolAddressIn(ModuleSetHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
return (*H)->Search(Name, ExportedSymbolsOnly, BaseLayer);
}
};
template <typename BaseLayerT>
template <typename ModuleSetT>
std::unique_ptr<typename LazyEmittingLayer<BaseLayerT>::EmissionDeferredSet>
LazyEmittingLayer<BaseLayerT>::EmissionDeferredSet::create(
BaseLayerT &B, ModuleSetT Ms, std::unique_ptr<RTDyldMemoryManager> MM) {
return llvm::make_unique<EmissionDeferredSetImpl<ModuleSetT>>(std::move(Ms),
std::move(MM));
}
}
#endif // LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H

View File

@ -0,0 +1,88 @@
//===- LookasideRTDyldMM - Redirect symbol lookup via a functor -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Defines an adapter for RuntimeDyldMM that allows lookups for external
// symbols to go via a functor.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_LOOKASIDERTDYLDMM_H
#define LLVM_EXECUTIONENGINE_ORC_LOOKASIDERTDYLDMM_H
#include <memory>
#include <vector>
namespace llvm {
/// @brief Defines an adapter for RuntimeDyldMM that allows lookups for external
/// symbols to go via a functor, before falling back to the lookup logic
/// provided by the underlying RuntimeDyldMM instance.
///
/// This class is useful for redirecting symbol lookup back to various layers
/// of a JIT component stack, e.g. to enable lazy module emission.
///
template <typename BaseRTDyldMM, typename ExternalLookupFtor,
typename DylibLookupFtor>
class LookasideRTDyldMM : public BaseRTDyldMM {
public:
/// @brief Create a LookasideRTDyldMM intance.
LookasideRTDyldMM(ExternalLookupFtor ExternalLookup,
DylibLookupFtor DylibLookup)
: ExternalLookup(std::move(ExternalLookup)),
DylibLookup(std::move(DylibLookup)) {}
/// @brief Look up the given symbol address, first via the functor this
/// instance was created with, then (if the symbol isn't found)
/// via the underlying RuntimeDyldMM.
uint64_t getSymbolAddress(const std::string &Name) override {
if (uint64_t Addr = ExternalLookup(Name))
return Addr;
return BaseRTDyldMM::getSymbolAddress(Name);
}
uint64_t getSymbolAddressInLogicalDylib(const std::string &Name) override {
if (uint64_t Addr = DylibLookup(Name))
return Addr;
return BaseRTDyldMM::getSymbolAddressInLogicalDylib(Name);
};
/// @brief Get a reference to the ExternalLookup functor.
ExternalLookupFtor &getExternalLookup() { return ExternalLookup; }
/// @brief Get a const-reference to the ExternalLookup functor.
const ExternalLookupFtor &getExternalLookup() const { return ExternalLookup; }
/// @brief Get a reference to the DylibLookup functor.
DylibLookupFtor &getDylibLookup() { return DylibLookup; }
/// @brief Get a const-reference to the DylibLookup functor.
const DylibLookupFtor &getDylibLookup() const { return DylibLookup; }
private:
ExternalLookupFtor ExternalLookup;
DylibLookupFtor DylibLookup;
};
/// @brief Create a LookasideRTDyldMM from a base memory manager type, an
/// external lookup functor, and a dylib lookup functor.
template <typename BaseRTDyldMM, typename ExternalLookupFtor,
typename DylibLookupFtor>
std::unique_ptr<
LookasideRTDyldMM<BaseRTDyldMM, ExternalLookupFtor, DylibLookupFtor>>
createLookasideRTDyldMM(ExternalLookupFtor &&ExternalLookup,
DylibLookupFtor &&DylibLookup) {
typedef LookasideRTDyldMM<BaseRTDyldMM, ExternalLookupFtor, DylibLookupFtor>
ThisLookasideMM;
return llvm::make_unique<ThisLookasideMM>(
std::forward<ExternalLookupFtor>(ExternalLookup),
std::forward<DylibLookupFtor>(DylibLookup));
}
}
#endif // LLVM_EXECUTIONENGINE_ORC_LOOKASIDERTDYLDMM_H

View File

@ -0,0 +1,254 @@
//===- ObjectLinkingLayer.h - Add object files to a JIT process -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains the definition for the object layer of the JIT.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
#include "LookasideRTDyldMM.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include <list>
#include <memory>
namespace llvm {
class ObjectLinkingLayerBase {
protected:
/// @brief Holds a set of objects to be allocated/linked as a unit in the JIT.
///
/// An instance of this class will be created for each set of objects added
/// via JITObjectLayer::addObjectSet. Deleting the instance (via
/// removeObjectSet) frees its memory, removing all symbol definitions that
/// had been provided by this instance. Higher level layers are responsible
/// for taking any action required to handle the missing symbols.
class LinkedObjectSet {
public:
LinkedObjectSet(std::unique_ptr<RTDyldMemoryManager> MM)
: MM(std::move(MM)), RTDyld(llvm::make_unique<RuntimeDyld>(&*this->MM)),
State(Raw) {}
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
addObject(const object::ObjectFile &Obj) {
return RTDyld->loadObject(Obj);
}
uint64_t getSymbolAddress(StringRef Name, bool ExportedSymbolsOnly) {
if (ExportedSymbolsOnly)
return RTDyld->getExportedSymbolLoadAddress(Name);
return RTDyld->getSymbolLoadAddress(Name);
}
bool NeedsFinalization() const { return (State == Raw); }
void Finalize() {
State = Finalizing;
RTDyld->resolveRelocations();
RTDyld->registerEHFrames();
MM->finalizeMemory();
State = Finalized;
}
void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress) {
assert((State != Finalized) &&
"Attempting to remap sections for finalized objects.");
RTDyld->mapSectionAddress(LocalAddress, TargetAddress);
}
private:
std::unique_ptr<RTDyldMemoryManager> MM;
std::unique_ptr<RuntimeDyld> RTDyld;
enum { Raw, Finalizing, Finalized } State;
};
typedef std::list<LinkedObjectSet> LinkedObjectSetListT;
public:
/// @brief Handle to a set of loaded objects.
typedef typename LinkedObjectSetListT::iterator ObjSetHandleT;
};
/// @brief Default (no-op) action to perform when loading objects.
class DoNothingOnNotifyLoaded {
public:
template <typename ObjSetT, typename LoadResult>
void operator()(ObjectLinkingLayerBase::ObjSetHandleT, const ObjSetT &,
const LoadResult &) {}
};
/// @brief Bare bones object linking layer.
///
/// This class is intended to be used as the base layer for a JIT. It allows
/// object files to be loaded into memory, linked, and the addresses of their
/// symbols queried. All objects added to this layer can see each other's
/// symbols.
template <typename NotifyLoadedFtor = DoNothingOnNotifyLoaded>
class ObjectLinkingLayer : public ObjectLinkingLayerBase {
public:
/// @brief LoadedObjectInfo list. Contains a list of owning pointers to
/// RuntimeDyld::LoadedObjectInfo instances.
typedef std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>
LoadedObjInfoList;
/// @brief Default construct an ObjectLinkingLayer.
ObjectLinkingLayer() {}
/// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded
/// functor.
ObjectLinkingLayer(NotifyLoadedFtor NotifyLoaded)
: NotifyLoaded(std::move(NotifyLoaded)) {}
/// @brief Construct an ObjectLinkingLayer with the given NotifyFinalized
/// functor.
ObjectLinkingLayer(std::function<void(ObjSetHandleT)> NotifyFinalized)
: NotifyFinalized(std::move(NotifyFinalized)) {}
/// @brief Construct an ObjectLinkingLayer with the given CreateMemoryManager
/// functor.
ObjectLinkingLayer(
std::function<std::unique_ptr<RTDyldMemoryManager>()> CreateMemoryManager)
: CreateMemoryManager(std::move(CreateMemoryManager)) {}
/// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded and
/// NotifyFinalized functors.
ObjectLinkingLayer(NotifyLoadedFtor NotifyLoaded,
std::function<void(ObjSetHandleT)> NotifyFinalized)
: NotifyLoaded(std::move(NotifyLoaded)),
NotifyFinalized(std::move(NotifyFinalized)) {}
/// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded and
/// CreateMemoryManager functors.
ObjectLinkingLayer(
NotifyLoadedFtor NotifyLoaded,
std::function<std::unique_ptr<RTDyldMemoryManager>()> CreateMemoryManager)
: NotifyLoaded(std::move(NotifyLoaded)),
CreateMemoryManager(std::move(CreateMemoryManager)) {}
/// @brief Construct an ObjectLinkingLayer with the given NotifyFinalized and
/// CreateMemoryManager functors.
ObjectLinkingLayer(
std::function<void(ObjSetHandleT)> NotifyFinalized,
std::function<std::unique_ptr<RTDyldMemoryManager>()> CreateMemoryManager)
: NotifyFinalized(std::move(NotifyFinalized)),
CreateMemoryManager(std::move(CreateMemoryManager)) {}
/// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded,
/// NotifyFinalized and CreateMemoryManager functors.
ObjectLinkingLayer(
NotifyLoadedFtor NotifyLoaded,
std::function<void(ObjSetHandleT)> NotifyFinalized,
std::function<std::unique_ptr<RTDyldMemoryManager>()> CreateMemoryManager)
: NotifyLoaded(std::move(NotifyLoaded)),
NotifyFinalized(std::move(NotifyFinalized)),
CreateMemoryManager(std::move(CreateMemoryManager)) {}
/// @brief Add a set of objects (or archives) that will be treated as a unit
/// for the purposes of symbol lookup and memory management.
///
/// @return A pair containing (1) A handle that can be used to free the memory
/// allocated for the objects, and (2) a LoadedObjInfoList containing
/// one LoadedObjInfo instance for each object at the corresponding
/// index in the Objects list.
///
/// This version of this method allows the client to pass in an
/// RTDyldMemoryManager instance that will be used to allocate memory and look
/// up external symbol addresses for the given objects.
template <typename ObjSetT>
ObjSetHandleT addObjectSet(const ObjSetT &Objects,
std::unique_ptr<RTDyldMemoryManager> MM) {
if (!MM) {
assert(CreateMemoryManager &&
"No memory manager or memory manager creator provided.");
MM = CreateMemoryManager();
}
ObjSetHandleT Handle = LinkedObjSetList.insert(
LinkedObjSetList.end(), LinkedObjectSet(std::move(MM)));
LinkedObjectSet &LOS = *Handle;
LoadedObjInfoList LoadedObjInfos;
for (auto &Obj : Objects)
LoadedObjInfos.push_back(LOS.addObject(*Obj));
NotifyLoaded(Handle, Objects, LoadedObjInfos);
return Handle;
}
/// @brief Map section addresses for the objects associated with the handle H.
void mapSectionAddress(ObjSetHandleT H, const void *LocalAddress,
uint64_t TargetAddress) {
H->mapSectionAddress(LocalAddress, TargetAddress);
}
/// @brief Remove the set of objects associated with handle H.
///
/// All memory allocated for the objects will be freed, and the sections and
/// symbols they provided will no longer be available. No attempt is made to
/// re-emit the missing symbols, and any use of these symbols (directly or
/// indirectly) will result in undefined behavior. If dependence tracking is
/// required to detect or resolve such issues it should be added at a higher
/// layer.
void removeObjectSet(ObjSetHandleT H) {
// How do we invalidate the symbols in H?
LinkedObjSetList.erase(H);
}
/// @brief Get the address of a loaded symbol.
///
/// @return The address in the target process's address space of the named
/// symbol. Null if no such symbol is known.
///
/// This method will trigger the finalization of the linked object set
/// containing the definition of the given symbol, if it is found.
uint64_t getSymbolAddress(StringRef Name, bool ExportedSymbolsOnly) {
for (auto I = LinkedObjSetList.begin(), E = LinkedObjSetList.end(); I != E;
++I)
if (uint64_t Addr = lookupSymbolAddressIn(I, Name, ExportedSymbolsOnly))
return Addr;
return 0;
}
/// @brief Search for a given symbol in the context of the set of loaded
/// objects represented by the handle H.
///
/// @return The address in the target process's address space of the named
/// symbol. Null if the given object set does not contain a definition
/// of this symbol.
///
/// This method will trigger the finalization of the linked object set
/// represented by the handle H if that set contains the requested symbol.
uint64_t lookupSymbolAddressIn(ObjSetHandleT H, StringRef Name,
bool ExportedSymbolsOnly) {
if (uint64_t Addr = H->getSymbolAddress(Name, ExportedSymbolsOnly)) {
if (H->NeedsFinalization()) {
H->Finalize();
if (NotifyFinalized)
NotifyFinalized(H);
}
return Addr;
}
return 0;
}
private:
LinkedObjectSetListT LinkedObjSetList;
NotifyLoadedFtor NotifyLoaded;
std::function<void(ObjSetHandleT)> NotifyFinalized;
std::function<std::unique_ptr<RTDyldMemoryManager>()> CreateMemoryManager;
};
} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H

View File

@ -0,0 +1,26 @@
//===-- OrcTargetSupport.h - Code to support specific targets --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Target specific code for Orc, e.g. callback assembly.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H
#define LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H
#include "IndirectionUtils.h"
namespace llvm {
/// @brief Insert callback asm into module M for the symbols managed by
/// JITResolveCallbackHandler J.
void insertX86CallbackAsm(Module &M, JITResolveCallbackHandler &J);
}
#endif // LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H

View File

@ -0,0 +1,38 @@
//===---- OrcMCJITReplacement.h - Orc-based MCJIT replacement ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file forces OrcMCJITReplacement to link in on certain operating systems.
// (Windows).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORCMCJITREPLACEMENT_H
#define LLVM_EXECUTIONENGINE_ORCMCJITREPLACEMENT_H
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include <cstdlib>
extern "C" void LLVMLinkInOrcMCJITReplacement();
namespace {
struct ForceOrcMCJITReplacementLinking {
ForceOrcMCJITReplacementLinking() {
// We must reference OrcMCJITReplacement in such a way that compilers will
// not delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough to know
// that getenv() never returns -1, this will do the job.
if (std::getenv("bar") != (char*) -1)
return;
LLVMLinkInOrcMCJITReplacement();
}
} ForceOrcMCJITReplacementLinking;
}
#endif

View File

@ -89,6 +89,27 @@ public:
return getSymbolAddressInProcess(Name);
}
/// This method returns the address of the specified symbol if it exists
/// within the logical dynamic library represented by this
/// RTDyldMemoryManager. Unlike getSymbolAddress, queries through this
/// interface should return addresses for hidden symbols.
///
/// This is of particular importance for the Orc JIT APIs, which support lazy
/// compilation by breaking up modules: Each of those broken out modules
/// must be able to resolve hidden symbols provided by the others. Clients
/// writing memory managers for MCJIT can usually ignore this method.
///
/// This method will be queried by RuntimeDyld when checking for previous
/// definitions of common symbols. It will *not* be queried by default when
/// resolving external symbols (this minimises the link-time overhead for
/// MCJIT clients who don't care about Orc features). If you are writing a
/// RTDyldMemoryManager for Orc and want "external" symbol resolution to
/// search the logical dylib, you should override your getSymbolAddress
/// method call this method directly.
virtual uint64_t getSymbolAddressInLogicalDylib(const std::string &Name) {
return 0;
}
/// This method returns the address of the specified function. As such it is
/// only useful for resolving library symbols, not code generated symbols.
///

View File

@ -10,6 +10,7 @@ add_llvm_library(LLVMExecutionEngine
add_subdirectory(Interpreter)
add_subdirectory(MCJIT)
add_subdirectory(Orc)
add_subdirectory(RuntimeDyld)
if( LLVM_USE_OPROFILE )

View File

@ -46,6 +46,11 @@ ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
std::unique_ptr<Module> M, std::string *ErrorStr,
std::unique_ptr<RTDyldMemoryManager> MCJMM,
std::unique_ptr<TargetMachine> TM) = nullptr;
ExecutionEngine *(*ExecutionEngine::OrcMCJITReplacementCtor)(
std::string *ErrorStr, std::unique_ptr<RTDyldMemoryManager> OrcJMM,
std::unique_ptr<TargetMachine> TM) = nullptr;
ExecutionEngine *(*ExecutionEngine::InterpCtor)(std::unique_ptr<Module> M,
std::string *ErrorStr) =nullptr;
@ -393,6 +398,10 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
return runFunction(Fn, GVArgs).IntVal.getZExtValue();
}
EngineBuilder::EngineBuilder() {
InitEngine();
}
EngineBuilder::EngineBuilder(std::unique_ptr<Module> M)
: M(std::move(M)), MCJMM(nullptr) {
InitEngine();
@ -414,6 +423,7 @@ void EngineBuilder::InitEngine() {
Options = TargetOptions();
RelocModel = Reloc::Default;
CMModel = CodeModel::JITDefault;
UseOrcMCJITReplacement = false;
// IR module verification is enabled by default in debug builds, and disabled
// by default in release builds.
@ -456,9 +466,14 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
}
ExecutionEngine *EE = nullptr;
if (ExecutionEngine::MCJITCtor)
if (ExecutionEngine::OrcMCJITReplacementCtor && UseOrcMCJITReplacement) {
EE = ExecutionEngine::OrcMCJITReplacementCtor(ErrorStr, std::move(MCJMM),
std::move(TheTM));
EE->addModule(std::move(M));
} else if (ExecutionEngine::MCJITCtor)
EE = ExecutionEngine::MCJITCtor(std::move(M), ErrorStr, std::move(MCJMM),
std::move(TheTM));
if (EE) {
EE->setVerifyModules(VerifyModules);
return EE;

View File

@ -16,7 +16,7 @@
;===------------------------------------------------------------------------===;
[common]
subdirectories = Interpreter MCJIT RuntimeDyld IntelJITEvents OProfileJIT
subdirectories = Interpreter MCJIT RuntimeDyld IntelJITEvents OProfileJIT Orc
[component_0]
type = Library

View File

@ -10,12 +10,12 @@
#ifndef LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
#define LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
#include "ObjectBuffer.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/ObjectCache.h"
#include "llvm/ExecutionEngine/ObjectMemoryBuffer.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/IR/Module.h"

View File

@ -11,7 +11,7 @@ LIBRARYNAME = LLVMExecutionEngine
include $(LEVEL)/Makefile.config
PARALLEL_DIRS = Interpreter MCJIT RuntimeDyld
PARALLEL_DIRS = Interpreter MCJIT Orc RuntimeDyld
ifeq ($(USE_INTEL_JITEVENTS), 1)
PARALLEL_DIRS += IntelJITEvents

View File

@ -0,0 +1,6 @@
add_llvm_library(LLVMOrcJIT
CloneSubModule.cpp
IndirectionUtils.cpp
OrcMCJITReplacement.cpp
OrcTargetSupport.cpp
)

View File

@ -0,0 +1,112 @@
#include "llvm/ExecutionEngine/Orc/CloneSubModule.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Module.h"
#include "llvm/Transforms/Utils/Cloning.h"
using namespace llvm;
void llvm::copyGVInitializer(GlobalVariable &New, const GlobalVariable &Orig,
ValueToValueMapTy &VMap) {
if (Orig.hasInitializer())
New.setInitializer(MapValue(Orig.getInitializer(), VMap));
}
void llvm::copyFunctionBody(Function &New, const Function &Orig,
ValueToValueMapTy &VMap) {
if (!Orig.isDeclaration()) {
Function::arg_iterator DestI = New.arg_begin();
for (Function::const_arg_iterator J = Orig.arg_begin(); J != Orig.arg_end();
++J) {
DestI->setName(J->getName());
VMap[J] = DestI++;
}
SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
CloneFunctionInto(&New, &Orig, VMap, /*ModuleLevelChanges=*/true, Returns);
}
}
std::unique_ptr<Module>
llvm::CloneSubModule(const Module &M,
HandleGlobalVariableFtor HandleGlobalVariable,
HandleFunctionFtor HandleFunction, bool KeepInlineAsm) {
ValueToValueMapTy VMap;
// First off, we need to create the new module.
std::unique_ptr<Module> New =
llvm::make_unique<Module>(M.getModuleIdentifier(), M.getContext());
New->setDataLayout(M.getDataLayout());
New->setTargetTriple(M.getTargetTriple());
if (KeepInlineAsm)
New->setModuleInlineAsm(M.getModuleInlineAsm());
// Copy global variables (but not initializers, yet).
for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
GlobalVariable *GV = new GlobalVariable(
*New, I->getType()->getElementType(), I->isConstant(), I->getLinkage(),
(Constant *)nullptr, I->getName(), (GlobalVariable *)nullptr,
I->getThreadLocalMode(), I->getType()->getAddressSpace());
GV->copyAttributesFrom(I);
VMap[I] = GV;
}
// Loop over the functions in the module, making external functions as before
for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
Function *NF =
Function::Create(cast<FunctionType>(I->getType()->getElementType()),
I->getLinkage(), I->getName(), &*New);
NF->copyAttributesFrom(I);
VMap[I] = NF;
}
// Loop over the aliases in the module
for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
I != E; ++I) {
auto *PTy = cast<PointerType>(I->getType());
auto *GA =
GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
I->getLinkage(), I->getName(), &*New);
GA->copyAttributesFrom(I);
VMap[I] = GA;
}
// Now that all of the things that global variable initializer can refer to
// have been created, loop through and copy the global variable referrers
// over... We also set the attributes on the global now.
for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
GlobalVariable &GV = *cast<GlobalVariable>(VMap[I]);
HandleGlobalVariable(GV, *I, VMap);
}
// Similarly, copy over function bodies now...
//
for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
Function &F = *cast<Function>(VMap[I]);
HandleFunction(F, *I, VMap);
}
// And aliases
for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
I != E; ++I) {
GlobalAlias *GA = cast<GlobalAlias>(VMap[I]);
if (const Constant *C = I->getAliasee())
GA->setAliasee(MapValue(C, VMap));
}
// And named metadata....
for (Module::const_named_metadata_iterator I = M.named_metadata_begin(),
E = M.named_metadata_end();
I != E; ++I) {
const NamedMDNode &NMD = *I;
NamedMDNode *NewNMD = New->getOrInsertNamedMetadata(NMD.getName());
for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i)
NewNMD->addOperand(MapMetadata(NMD.getOperand(i), VMap));
}
return New;
}

View File

@ -0,0 +1,157 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include "llvm/ExecutionEngine/Orc/CloneSubModule.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/IRBuilder.h"
#include <set>
using namespace llvm;
namespace llvm {
JITIndirections makeCallsSingleIndirect(
Module &M, const std::function<bool(const Function &)> &ShouldIndirect,
const char *JITImplSuffix, const char *JITAddrSuffix) {
std::vector<Function *> Worklist;
std::vector<std::string> FuncNames;
for (auto &F : M)
if (ShouldIndirect(F) && (F.user_begin() != F.user_end())) {
Worklist.push_back(&F);
FuncNames.push_back(F.getName());
}
for (auto *F : Worklist) {
GlobalVariable *FImplAddr = new GlobalVariable(
M, F->getType(), false, GlobalValue::ExternalLinkage,
Constant::getNullValue(F->getType()), F->getName() + JITAddrSuffix,
nullptr, GlobalValue::NotThreadLocal, 0, true);
FImplAddr->setVisibility(GlobalValue::HiddenVisibility);
for (auto *U : F->users()) {
assert(isa<Instruction>(U) && "Cannot indirect non-instruction use");
IRBuilder<> Builder(cast<Instruction>(U));
U->replaceUsesOfWith(F, Builder.CreateLoad(FImplAddr));
}
}
return JITIndirections(
FuncNames, [=](StringRef S) -> std::string { return std::string(S); },
[=](StringRef S)
-> std::string { return std::string(S) + JITAddrSuffix; });
}
JITIndirections makeCallsDoubleIndirect(
Module &M, const std::function<bool(const Function &)> &ShouldIndirect,
const char *JITImplSuffix, const char *JITAddrSuffix) {
std::vector<Function *> Worklist;
std::vector<std::string> FuncNames;
for (auto &F : M)
if (!F.isDeclaration() && !F.hasAvailableExternallyLinkage() &&
ShouldIndirect(F))
Worklist.push_back(&F);
for (auto *F : Worklist) {
std::string OrigName = F->getName();
F->setName(OrigName + JITImplSuffix);
FuncNames.push_back(OrigName);
GlobalVariable *FImplAddr = new GlobalVariable(
M, F->getType(), false, GlobalValue::ExternalLinkage,
Constant::getNullValue(F->getType()), OrigName + JITAddrSuffix, nullptr,
GlobalValue::NotThreadLocal, 0, true);
FImplAddr->setVisibility(GlobalValue::HiddenVisibility);
Function *FRedirect =
Function::Create(F->getFunctionType(), F->getLinkage(), OrigName, &M);
F->replaceAllUsesWith(FRedirect);
BasicBlock *EntryBlock =
BasicBlock::Create(M.getContext(), "entry", FRedirect);
IRBuilder<> Builder(EntryBlock);
LoadInst *FImplLoadedAddr = Builder.CreateLoad(FImplAddr);
std::vector<Value *> CallArgs;
for (Value &Arg : FRedirect->args())
CallArgs.push_back(&Arg);
CallInst *Call = Builder.CreateCall(FImplLoadedAddr, CallArgs);
Call->setTailCall();
Builder.CreateRet(Call);
}
return JITIndirections(
FuncNames, [=](StringRef S)
-> std::string { return std::string(S) + JITImplSuffix; },
[=](StringRef S)
-> std::string { return std::string(S) + JITAddrSuffix; });
}
std::vector<std::unique_ptr<Module>>
explode(const Module &OrigMod,
const std::function<bool(const Function &)> &ShouldExtract) {
std::vector<std::unique_ptr<Module>> NewModules;
// Split all the globals, non-indirected functions, etc. into a single module.
auto ExtractGlobalVars = [&](GlobalVariable &New, const GlobalVariable &Orig,
ValueToValueMapTy &VMap) {
copyGVInitializer(New, Orig, VMap);
if (New.getLinkage() == GlobalValue::PrivateLinkage) {
New.setLinkage(GlobalValue::ExternalLinkage);
New.setVisibility(GlobalValue::HiddenVisibility);
}
};
auto ExtractNonImplFunctions =
[&](Function &New, const Function &Orig, ValueToValueMapTy &VMap) {
if (!ShouldExtract(New))
copyFunctionBody(New, Orig, VMap);
};
NewModules.push_back(CloneSubModule(OrigMod, ExtractGlobalVars,
ExtractNonImplFunctions, true));
// Preserve initializers for Common linkage vars, and make private linkage
// globals external: they are now provided by the globals module extracted
// above.
auto DropGlobalVars = [&](GlobalVariable &New, const GlobalVariable &Orig,
ValueToValueMapTy &VMap) {
if (New.getLinkage() == GlobalValue::CommonLinkage)
copyGVInitializer(New, Orig, VMap);
else if (New.getLinkage() == GlobalValue::PrivateLinkage)
New.setLinkage(GlobalValue::ExternalLinkage);
};
// Split each 'impl' function out in to its own module.
for (const auto &Func : OrigMod) {
if (Func.isDeclaration() || !ShouldExtract(Func))
continue;
auto ExtractNamedFunction =
[&](Function &New, const Function &Orig, ValueToValueMapTy &VMap) {
if (New.getName() == Func.getName())
copyFunctionBody(New, Orig, VMap);
};
NewModules.push_back(
CloneSubModule(OrigMod, DropGlobalVars, ExtractNamedFunction, false));
}
return NewModules;
}
std::vector<std::unique_ptr<Module>>
explode(const Module &OrigMod, const JITIndirections &Indirections) {
std::set<std::string> ImplNames;
for (const auto &FuncName : Indirections.IndirectedNames)
ImplNames.insert(Indirections.GetImplName(FuncName));
return explode(
OrigMod, [&](const Function &F) { return ImplNames.count(F.getName()); });
}
}

View File

@ -0,0 +1,22 @@
;===- ./lib/ExecutionEngine/MCJIT/LLVMBuild.txt ----------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[component_0]
type = Library
name = OrcJIT
parent = ExecutionEngine
required_libraries = Core ExecutionEngine Object RuntimeDyld Support Target

View File

@ -0,0 +1,13 @@
##===- lib/ExecutionEngine/OrcJIT/Makefile -----------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
LEVEL = ../../..
LIBRARYNAME = LLVMOrcJIT
include $(LEVEL)/Makefile.common

View File

@ -0,0 +1,124 @@
//===-------- OrcMCJITReplacement.cpp - Orc-based MCJIT replacement -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "OrcMCJITReplacement.h"
#include "llvm/ExecutionEngine/GenericValue.h"
namespace {
static struct RegisterJIT {
RegisterJIT() { llvm::OrcMCJITReplacement::Register(); }
} JITRegistrator;
extern "C" void LLVMLinkInOrcMCJITReplacement() {}
}
namespace llvm {
GenericValue
OrcMCJITReplacement::runFunction(Function *F,
const std::vector<GenericValue> &ArgValues) {
assert(F && "Function *F was null at entry to run()");
void *FPtr = getPointerToFunction(F);
assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
FunctionType *FTy = F->getFunctionType();
Type *RetTy = FTy->getReturnType();
assert((FTy->getNumParams() == ArgValues.size() ||
(FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
"Wrong number of arguments passed into function!");
assert(FTy->getNumParams() == ArgValues.size() &&
"This doesn't support passing arguments through varargs (yet)!");
// Handle some common cases first. These cases correspond to common `main'
// prototypes.
if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
switch (ArgValues.size()) {
case 3:
if (FTy->getParamType(0)->isIntegerTy(32) &&
FTy->getParamType(1)->isPointerTy() &&
FTy->getParamType(2)->isPointerTy()) {
int (*PF)(int, char **, const char **) =
(int (*)(int, char **, const char **))(intptr_t)FPtr;
// Call the function.
GenericValue rv;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
(char **)GVTOP(ArgValues[1]),
(const char **)GVTOP(ArgValues[2])));
return rv;
}
break;
case 2:
if (FTy->getParamType(0)->isIntegerTy(32) &&
FTy->getParamType(1)->isPointerTy()) {
int (*PF)(int, char **) = (int (*)(int, char **))(intptr_t)FPtr;
// Call the function.
GenericValue rv;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
(char **)GVTOP(ArgValues[1])));
return rv;
}
break;
case 1:
if (FTy->getNumParams() == 1 && FTy->getParamType(0)->isIntegerTy(32)) {
GenericValue rv;
int (*PF)(int) = (int (*)(int))(intptr_t)FPtr;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
return rv;
}
break;
}
}
// Handle cases where no arguments are passed first.
if (ArgValues.empty()) {
GenericValue rv;
switch (RetTy->getTypeID()) {
default:
llvm_unreachable("Unknown return type for function call!");
case Type::IntegerTyID: {
unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
if (BitWidth == 1)
rv.IntVal = APInt(BitWidth, ((bool (*)())(intptr_t)FPtr)());
else if (BitWidth <= 8)
rv.IntVal = APInt(BitWidth, ((char (*)())(intptr_t)FPtr)());
else if (BitWidth <= 16)
rv.IntVal = APInt(BitWidth, ((short (*)())(intptr_t)FPtr)());
else if (BitWidth <= 32)
rv.IntVal = APInt(BitWidth, ((int (*)())(intptr_t)FPtr)());
else if (BitWidth <= 64)
rv.IntVal = APInt(BitWidth, ((int64_t (*)())(intptr_t)FPtr)());
else
llvm_unreachable("Integer types > 64 bits not supported");
return rv;
}
case Type::VoidTyID:
rv.IntVal = APInt(32, ((int (*)())(intptr_t)FPtr)());
return rv;
case Type::FloatTyID:
rv.FloatVal = ((float (*)())(intptr_t)FPtr)();
return rv;
case Type::DoubleTyID:
rv.DoubleVal = ((double (*)())(intptr_t)FPtr)();
return rv;
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
llvm_unreachable("long double not supported yet");
case Type::PointerTyID:
return PTOGV(((void *(*)())(intptr_t)FPtr)());
}
}
llvm_unreachable("Full-featured argument passing not supported yet!");
}
}

View File

@ -0,0 +1,328 @@
//===---- OrcMCJITReplacement.h - Orc based MCJIT replacement ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Orc based MCJIT replacement.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_ORC_ORCMCJITREPLACEMENT_H
#define LLVM_LIB_EXECUTIONENGINE_ORC_ORCMCJITREPLACEMENT_H
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/LazyEmittingLayer.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
#include "llvm/Object/Archive.h"
#include "llvm/Target/TargetSubtargetInfo.h"
namespace llvm {
class OrcMCJITReplacement : public ExecutionEngine {
class ForwardingRTDyldMM : public RTDyldMemoryManager {
public:
ForwardingRTDyldMM(OrcMCJITReplacement &M) : M(M) {}
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID,
StringRef SectionName) override {
uint8_t *Addr =
M.MM->allocateCodeSection(Size, Alignment, SectionID, SectionName);
M.SectionsAllocatedSinceLastLoad.insert(Addr);
return Addr;
}
uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID, StringRef SectionName,
bool IsReadOnly) override {
uint8_t *Addr = M.MM->allocateDataSection(Size, Alignment, SectionID,
SectionName, IsReadOnly);
M.SectionsAllocatedSinceLastLoad.insert(Addr);
return Addr;
}
void reserveAllocationSpace(uintptr_t CodeSize, uintptr_t DataSizeRO,
uintptr_t DataSizeRW) override {
return M.MM->reserveAllocationSpace(CodeSize, DataSizeRO, DataSizeRW);
}
bool needsToReserveAllocationSpace() override {
return M.MM->needsToReserveAllocationSpace();
}
void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
size_t Size) override {
return M.MM->registerEHFrames(Addr, LoadAddr, Size);
}
void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr,
size_t Size) override {
return M.MM->deregisterEHFrames(Addr, LoadAddr, Size);
}
uint64_t getSymbolAddress(const std::string &Name) override {
return M.getSymbolAddressWithoutMangling(Name);
}
void *getPointerToNamedFunction(const std::string &Name,
bool AbortOnFailure = true) override {
return M.MM->getPointerToNamedFunction(Name, AbortOnFailure);
}
void notifyObjectLoaded(ExecutionEngine *EE,
const object::ObjectFile &O) override {
return M.MM->notifyObjectLoaded(EE, O);
}
bool finalizeMemory(std::string *ErrMsg = nullptr) override {
// Each set of objects loaded will be finalized exactly once, but since
// symbol lookup during relocation may recursively trigger the
// loading/relocation of other modules, and since we're forwarding all
// finalizeMemory calls to a single underlying memory manager, we need to
// defer forwarding the call on until all necessary objects have been
// loaded. Otherwise, during the relocation of a leaf object, we will end
// up finalizing memory, causing a crash further up the stack when we
// attempt to apply relocations to finalized memory.
// To avoid finalizing too early, look at how many objects have been
// loaded but not yet finalized. This is a bit of a hack that relies on
// the fact that we're lazily emitting object files: The only way you can
// get more than one set of objects loaded but not yet finalized is if
// they were loaded during relocation of another set.
if (M.UnfinalizedSections.size() == 1)
return M.MM->finalizeMemory(ErrMsg);
return false;
}
private:
OrcMCJITReplacement &M;
};
private:
static ExecutionEngine *
createOrcMCJITReplacement(std::string *ErrorMsg,
std::unique_ptr<RTDyldMemoryManager> OrcJMM,
std::unique_ptr<llvm::TargetMachine> TM) {
return new llvm::OrcMCJITReplacement(std::move(OrcJMM), std::move(TM));
}
public:
static void Register() {
OrcMCJITReplacementCtor = createOrcMCJITReplacement;
}
OrcMCJITReplacement(std::unique_ptr<RTDyldMemoryManager> MM,
std::unique_ptr<TargetMachine> TM)
: TM(std::move(TM)), MM(std::move(MM)),
Mang(this->TM->getSubtargetImpl()->getDataLayout()),
NotifyObjectLoaded(*this), NotifyFinalized(*this),
ObjectLayer(NotifyObjectLoaded, NotifyFinalized),
CompileLayer(ObjectLayer, SimpleCompiler(*this->TM)),
LazyEmitLayer(CompileLayer) {
setDataLayout(this->TM->getSubtargetImpl()->getDataLayout());
}
void addModule(std::unique_ptr<Module> M) {
// If this module doesn't have a DataLayout attached then attach the
// default.
if (!M->getDataLayout())
M->setDataLayout(getDataLayout());
OwnedModules.push_back(std::move(M));
std::vector<Module *> Ms;
Ms.push_back(&*OwnedModules.back());
LazyEmitLayer.addModuleSet(std::move(Ms),
llvm::make_unique<ForwardingRTDyldMM>(*this));
}
void addObjectFile(std::unique_ptr<object::ObjectFile> O) override {
std::vector<std::unique_ptr<object::ObjectFile>> Objs;
Objs.push_back(std::move(O));
ObjectLayer.addObjectSet(std::move(Objs),
llvm::make_unique<ForwardingRTDyldMM>(*this));
}
void addObjectFile(object::OwningBinary<object::ObjectFile> O) override {
std::unique_ptr<object::ObjectFile> Obj;
std::unique_ptr<MemoryBuffer> Buf;
std::tie(Obj, Buf) = O.takeBinary();
std::vector<std::unique_ptr<object::ObjectFile>> Objs;
Objs.push_back(std::move(Obj));
ObjectLayer.addObjectSet(std::move(Objs),
llvm::make_unique<ForwardingRTDyldMM>(*this));
}
void addArchive(object::OwningBinary<object::Archive> A) override {
Archives.push_back(std::move(A));
}
uint64_t getSymbolAddress(StringRef Name) {
return getSymbolAddressWithoutMangling(Mangle(Name));
}
void finalizeObject() override {
// This is deprecated - Aim to remove in ExecutionEngine.
// REMOVE IF POSSIBLE - Doesn't make sense for New JIT.
}
void mapSectionAddress(const void *LocalAddress,
uint64_t TargetAddress) override {
for (auto &P : UnfinalizedSections)
if (P.second.count(LocalAddress))
ObjectLayer.mapSectionAddress(P.first, LocalAddress, TargetAddress);
}
uint64_t getGlobalValueAddress(const std::string &Name) override {
return getSymbolAddress(Name);
}
uint64_t getFunctionAddress(const std::string &Name) override {
return getSymbolAddress(Name);
}
void *getPointerToFunction(Function *F) override {
uint64_t FAddr = getSymbolAddress(F->getName());
return reinterpret_cast<void *>(static_cast<uintptr_t>(FAddr));
}
void *getPointerToNamedFunction(StringRef Name,
bool AbortOnFailure = true) override {
uint64_t Addr = getSymbolAddress(Name);
if (!Addr && AbortOnFailure)
llvm_unreachable("Missing symbol!");
return reinterpret_cast<void *>(static_cast<uintptr_t>(Addr));
}
GenericValue runFunction(Function *F,
const std::vector<GenericValue> &ArgValues) override;
void setObjectCache(ObjectCache *NewCache) override {
CompileLayer.setObjectCache(NewCache);
}
private:
uint64_t getSymbolAddressWithoutMangling(StringRef Name) {
if (uint64_t Addr = LazyEmitLayer.getSymbolAddress(Name, false))
return Addr;
if (uint64_t Addr = MM->getSymbolAddress(Name))
return Addr;
if (uint64_t Addr = scanArchives(Name))
return Addr;
return 0;
}
uint64_t scanArchives(StringRef Name) {
for (object::OwningBinary<object::Archive> &OB : Archives) {
object::Archive *A = OB.getBinary();
// Look for our symbols in each Archive
object::Archive::child_iterator ChildIt = A->findSym(Name);
if (ChildIt != A->child_end()) {
// FIXME: Support nested archives?
ErrorOr<std::unique_ptr<object::Binary>> ChildBinOrErr =
ChildIt->getAsBinary();
if (ChildBinOrErr.getError())
continue;
std::unique_ptr<object::Binary> &ChildBin = ChildBinOrErr.get();
if (ChildBin->isObject()) {
std::vector<std::unique_ptr<object::ObjectFile>> ObjSet;
ObjSet.push_back(std::unique_ptr<object::ObjectFile>(
static_cast<object::ObjectFile *>(ChildBin.release())));
ObjectLayer.addObjectSet(
std::move(ObjSet), llvm::make_unique<ForwardingRTDyldMM>(*this));
if (uint64_t Addr = ObjectLayer.getSymbolAddress(Name, true))
return Addr;
}
}
}
return 0;
}
class NotifyObjectLoadedT {
public:
typedef std::vector<std::unique_ptr<object::ObjectFile>> ObjListT;
typedef std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>
LoadedObjInfoListT;
NotifyObjectLoadedT(OrcMCJITReplacement &M) : M(M) {}
void operator()(ObjectLinkingLayerBase::ObjSetHandleT H,
const ObjListT &Objects,
const LoadedObjInfoListT &Infos) const {
M.UnfinalizedSections[H] = std::move(M.SectionsAllocatedSinceLastLoad);
M.SectionsAllocatedSinceLastLoad = {};
assert(Objects.size() == Infos.size() &&
"Incorrect number of Infos for Objects.");
for (unsigned I = 0; I < Objects.size(); ++I)
M.MM->notifyObjectLoaded(&M, *Objects[I]);
};
private:
OrcMCJITReplacement &M;
};
class NotifyFinalizedT {
public:
NotifyFinalizedT(OrcMCJITReplacement &M) : M(M) {}
void operator()(ObjectLinkingLayerBase::ObjSetHandleT H) {
M.UnfinalizedSections.erase(H);
}
private:
OrcMCJITReplacement &M;
};
std::string Mangle(StringRef Name) {
std::string MangledName;
{
raw_string_ostream MangledNameStream(MangledName);
Mang.getNameWithPrefix(MangledNameStream, Name);
}
return MangledName;
}
typedef ObjectLinkingLayer<NotifyObjectLoadedT> ObjectLayerT;
typedef IRCompileLayer<ObjectLayerT> CompileLayerT;
typedef LazyEmittingLayer<CompileLayerT> LazyEmitLayerT;
std::unique_ptr<TargetMachine> TM;
std::unique_ptr<RTDyldMemoryManager> MM;
Mangler Mang;
NotifyObjectLoadedT NotifyObjectLoaded;
NotifyFinalizedT NotifyFinalized;
ObjectLayerT ObjectLayer;
CompileLayerT CompileLayer;
LazyEmitLayerT LazyEmitLayer;
// MCJIT keeps modules alive - we need to do the same for backwards
// compatibility.
std::vector<std::unique_ptr<Module>> OwnedModules;
// We need to store ObjLayerT::ObjSetHandles for each of the object sets
// that have been emitted but not yet finalized so that we can forward the
// mapSectionAddress calls appropriately.
typedef std::set<const void *> SectionAddrSet;
struct ObjSetHandleCompare {
bool operator()(ObjectLayerT::ObjSetHandleT H1,
ObjectLayerT::ObjSetHandleT H2) const {
return &*H1 < &*H2;
}
};
SectionAddrSet SectionsAllocatedSinceLastLoad;
std::map<ObjectLayerT::ObjSetHandleT, SectionAddrSet, ObjSetHandleCompare>
UnfinalizedSections;
std::vector<object::OwningBinary<object::Archive>> Archives;
};
}
#endif // LLVM_LIB_EXECUTIONENGINE_ORC_MCJITREPLACEMENT_H

View File

@ -0,0 +1,102 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include <array>
using namespace llvm;
namespace {
const char *JITCallbackFuncName = "call_jit_for_lazy_compile";
const char *JITCallbackIndexLabelPrefix = "jit_resolve_";
std::array<const char *, 12> X86GPRsToSave = {{
"rbp", "rbx", "r12", "r13", "r14", "r15", // Callee saved.
"rdi", "rsi", "rdx", "rcx", "r8", "r9", // Int args.
}};
std::array<const char *, 8> X86XMMsToSave = {{
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" // FP args
}};
template <typename OStream> unsigned saveX86Regs(OStream &OS) {
for (const auto &GPR : X86GPRsToSave)
OS << " pushq %" << GPR << "\n";
OS << " subq $" << (16 * X86XMMsToSave.size()) << ", %rsp\n";
for (unsigned i = 0; i < X86XMMsToSave.size(); ++i)
OS << " movdqu %" << X86XMMsToSave[i] << ", "
<< (16 * (X86XMMsToSave.size() - i - 1)) << "(%rsp)\n";
return (8 * X86GPRsToSave.size()) + (16 * X86XMMsToSave.size());
}
template <typename OStream> void restoreX86Regs(OStream &OS) {
for (unsigned i = 0; i < X86XMMsToSave.size(); ++i)
OS << " movdqu " << (16 * i) << "(%rsp), %"
<< X86XMMsToSave[(X86XMMsToSave.size() - i - 1)] << "\n";
OS << " addq $" << (16 * X86XMMsToSave.size()) << ", %rsp\n";
for (unsigned i = 0; i < X86GPRsToSave.size(); ++i)
OS << " popq %" << X86GPRsToSave[X86GPRsToSave.size() - i - 1] << "\n";
}
uint64_t call_jit_for_fn(JITResolveCallbackHandler *J, uint64_t FuncIdx) {
return J->resolve(FuncIdx);
}
}
namespace llvm {
std::string getJITResolveCallbackIndexLabel(unsigned I) {
std::ostringstream LabelStream;
LabelStream << JITCallbackIndexLabelPrefix << I;
return LabelStream.str();
}
void insertX86CallbackAsm(Module &M, JITResolveCallbackHandler &J) {
uint64_t CallbackAddr =
static_cast<uint64_t>(reinterpret_cast<uintptr_t>(call_jit_for_fn));
std::ostringstream JITCallbackAsm;
Triple TT(M.getTargetTriple());
if (TT.getOS() == Triple::Darwin)
JITCallbackAsm << ".section __TEXT,__text,regular,pure_instructions\n"
<< ".align 4, 0x90\n";
else
JITCallbackAsm << ".text\n"
<< ".align 16, 0x90\n";
JITCallbackAsm << "jit_object_addr:\n"
<< " .quad " << &J << "\n" << JITCallbackFuncName << ":\n";
uint64_t ReturnAddrOffset = saveX86Regs(JITCallbackAsm);
// Compute index, load object address, and call JIT.
JITCallbackAsm << " movq " << ReturnAddrOffset << "(%rsp), %rax\n"
<< " leaq (jit_indices_start+5)(%rip), %rbx\n"
<< " subq %rbx, %rax\n"
<< " xorq %rdx, %rdx\n"
<< " movq $5, %rbx\n"
<< " divq %rbx\n"
<< " movq %rax, %rsi\n"
<< " leaq jit_object_addr(%rip), %rdi\n"
<< " movq (%rdi), %rdi\n"
<< " movabsq $" << CallbackAddr << ", %rax\n"
<< " callq *%rax\n"
<< " movq %rax, " << ReturnAddrOffset << "(%rsp)\n";
restoreX86Regs(JITCallbackAsm);
JITCallbackAsm << " retq\n"
<< "jit_indices_start:\n";
for (JITResolveCallbackHandler::StubIndex I = 0; I < J.getNumFuncs(); ++I)
JITCallbackAsm << getJITResolveCallbackIndexLabel(I) << ":\n"
<< " callq " << JITCallbackFuncName << "\n";
M.appendModuleInlineAsm(JITCallbackAsm.str());
}
}

View File

@ -449,11 +449,9 @@ void RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
StringRef Name;
Check(Sym.getName(Name));
assert((GlobalSymbolTable.find(Name) == GlobalSymbolTable.end()) &&
"Common symbol in global symbol table.");
// Skip common symbols already elsewhere.
if (GlobalSymbolTable.count(Name)) {
if (GlobalSymbolTable.count(Name) ||
MemMgr->getSymbolAddressInLogicalDylib(Name)) {
DEBUG(dbgs() << "\tSkipping already emitted common symbol '" << Name
<< "'\n");
continue;

View File

@ -10,6 +10,7 @@ set(LLVM_LINK_COMPONENTS
MC
MCJIT
Object
OrcJIT
SelectionDAG
Support
native

View File

@ -25,6 +25,7 @@
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/MCJIT.h"
#include "llvm/ExecutionEngine/ObjectCache.h"
#include "llvm/ExecutionEngine/OrcMCJITReplacement.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
@ -74,6 +75,13 @@ namespace {
cl::desc("Force interpretation: disable JIT"),
cl::init(false));
cl::opt<bool> UseOrcMCJITReplacement("use-orcmcjit",
cl::desc("Use the experimental "
"OrcMCJITReplacement as a "
"drop-in replacement for "
"MCJIT."),
cl::init(false));
// The MCJIT supports building for a target address space separate from
// the JIT compilation process. Use a forked process and a copying
// memory manager with IPC to execute using this functionality.
@ -421,6 +429,7 @@ int main(int argc, char **argv, char * const *envp) {
builder.setEngineKind(ForceInterpreter
? EngineKind::Interpreter
: EngineKind::JIT);
builder.setUseOrcMCJITReplacement(UseOrcMCJITReplacement);
// If we are supposed to override the target triple, do so now.
if (!TargetTriple.empty())