2004-11-22 22:00:25 +00:00
|
|
|
//===-- JITEmitter.cpp - Write machine code to executable memory ----------===//
|
2005-04-21 22:55:34 +00:00
|
|
|
//
|
2003-10-20 19:43:21 +00:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-21 22:55:34 +00:00
|
|
|
//
|
2003-10-20 19:43:21 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-12-24 00:01:05 +00:00
|
|
|
//
|
2004-11-20 03:46:14 +00:00
|
|
|
// This file defines a MachineCodeEmitter object that is used by the JIT to
|
|
|
|
// write machine code to memory and remember where relocatable values are.
|
2002-12-24 00:01:05 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2003-08-05 17:00:32 +00:00
|
|
|
#define DEBUG_TYPE "jit"
|
2003-12-20 01:46:27 +00:00
|
|
|
#include "JIT.h"
|
2008-02-13 18:39:37 +00:00
|
|
|
#include "JITDwarfEmitter.h"
|
2003-11-30 04:23:21 +00:00
|
|
|
#include "llvm/Constant.h"
|
|
|
|
#include "llvm/Module.h"
|
2005-03-17 15:38:16 +00:00
|
|
|
#include "llvm/Type.h"
|
2002-12-24 00:01:05 +00:00
|
|
|
#include "llvm/CodeGen/MachineCodeEmitter.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2003-01-13 01:00:12 +00:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2006-04-22 18:53:45 +00:00
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
2008-02-13 18:39:37 +00:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfo.h"
|
2004-11-20 03:46:14 +00:00
|
|
|
#include "llvm/CodeGen/MachineRelocation.h"
|
2007-12-05 23:39:57 +00:00
|
|
|
#include "llvm/ExecutionEngine/JITMemoryManager.h"
|
2003-01-13 01:00:12 +00:00
|
|
|
#include "llvm/Target/TargetData.h"
|
2004-11-20 03:46:14 +00:00
|
|
|
#include "llvm/Target/TargetJITInfo.h"
|
2006-12-14 19:17:33 +00:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2008-02-13 18:39:37 +00:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2004-09-01 22:55:40 +00:00
|
|
|
#include "llvm/Support/Debug.h"
|
2006-05-08 22:00:52 +00:00
|
|
|
#include "llvm/Support/MutexGuard.h"
|
2007-01-23 10:26:08 +00:00
|
|
|
#include "llvm/System/Disassembler.h"
|
2008-04-18 20:59:31 +00:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2004-09-01 22:55:40 +00:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2005-07-29 23:40:16 +00:00
|
|
|
#include <algorithm>
|
2003-12-08 08:06:28 +00:00
|
|
|
using namespace llvm;
|
2003-11-11 22:41:34 +00:00
|
|
|
|
2006-12-19 22:43:32 +00:00
|
|
|
STATISTIC(NumBytes, "Number of bytes of machine code compiled");
|
|
|
|
STATISTIC(NumRelos, "Number of relocations applied");
|
|
|
|
static JIT *TheJIT = 0;
|
2002-12-24 00:01:05 +00:00
|
|
|
|
2005-07-29 23:40:16 +00:00
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// JIT lazy compilation code.
|
|
|
|
//
|
|
|
|
namespace {
|
2005-07-12 15:51:55 +00:00
|
|
|
class JITResolverState {
|
|
|
|
private:
|
|
|
|
/// FunctionToStubMap - Keep track of the stub created for a particular
|
|
|
|
/// function so that we can reuse them if necessary.
|
|
|
|
std::map<Function*, void*> FunctionToStubMap;
|
|
|
|
|
|
|
|
/// StubToFunctionMap - Keep track of the function that each stub
|
|
|
|
/// corresponds to.
|
|
|
|
std::map<void*, Function*> StubToFunctionMap;
|
2005-07-27 06:12:32 +00:00
|
|
|
|
2008-01-04 10:46:51 +00:00
|
|
|
/// GlobalToLazyPtrMap - Keep track of the lazy pointer created for a
|
|
|
|
/// particular GlobalVariable so that we can reuse them if necessary.
|
|
|
|
std::map<GlobalValue*, void*> GlobalToLazyPtrMap;
|
|
|
|
|
2005-07-12 15:51:55 +00:00
|
|
|
public:
|
|
|
|
std::map<Function*, void*>& getFunctionToStubMap(const MutexGuard& locked) {
|
|
|
|
assert(locked.holds(TheJIT->lock));
|
|
|
|
return FunctionToStubMap;
|
|
|
|
}
|
2005-07-27 06:12:32 +00:00
|
|
|
|
2005-07-12 15:51:55 +00:00
|
|
|
std::map<void*, Function*>& getStubToFunctionMap(const MutexGuard& locked) {
|
|
|
|
assert(locked.holds(TheJIT->lock));
|
|
|
|
return StubToFunctionMap;
|
|
|
|
}
|
2008-01-04 10:46:51 +00:00
|
|
|
|
|
|
|
std::map<GlobalValue*, void*>&
|
|
|
|
getGlobalToLazyPtrMap(const MutexGuard& locked) {
|
|
|
|
assert(locked.holds(TheJIT->lock));
|
|
|
|
return GlobalToLazyPtrMap;
|
|
|
|
}
|
2005-07-12 15:51:55 +00:00
|
|
|
};
|
2005-07-27 06:12:32 +00:00
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
/// JITResolver - Keep track of, and resolve, call sites for functions that
|
|
|
|
/// have not yet been compiled.
|
|
|
|
class JITResolver {
|
2004-11-21 03:37:42 +00:00
|
|
|
/// LazyResolverFn - The target lazy resolver function that we actually
|
|
|
|
/// rewrite instructions to use.
|
|
|
|
TargetJITInfo::LazyResolverFn LazyResolverFn;
|
|
|
|
|
2005-07-12 15:51:55 +00:00
|
|
|
JITResolverState state;
|
2004-11-20 23:57:07 +00:00
|
|
|
|
2005-04-18 01:44:27 +00:00
|
|
|
/// ExternalFnToStubMap - This is the equivalent of FunctionToStubMap for
|
|
|
|
/// external functions.
|
|
|
|
std::map<void*, void*> ExternalFnToStubMap;
|
2005-07-28 12:44:13 +00:00
|
|
|
|
|
|
|
//map addresses to indexes in the GOT
|
|
|
|
std::map<void*, unsigned> revGOTMap;
|
|
|
|
unsigned nextGOTIndex;
|
|
|
|
|
2007-02-24 02:57:03 +00:00
|
|
|
static JITResolver *TheJITResolver;
|
2004-11-20 23:57:07 +00:00
|
|
|
public:
|
2008-03-25 22:06:05 +00:00
|
|
|
explicit JITResolver(JIT &jit) : nextGOTIndex(0) {
|
2007-02-24 02:57:03 +00:00
|
|
|
TheJIT = &jit;
|
|
|
|
|
|
|
|
LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn);
|
|
|
|
assert(TheJITResolver == 0 && "Multiple JIT resolvers?");
|
|
|
|
TheJITResolver = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
~JITResolver() {
|
|
|
|
TheJITResolver = 0;
|
2004-11-21 03:37:42 +00:00
|
|
|
}
|
2004-11-20 23:57:07 +00:00
|
|
|
|
|
|
|
/// getFunctionStub - This returns a pointer to a function stub, creating
|
|
|
|
/// one on demand as needed.
|
|
|
|
void *getFunctionStub(Function *F);
|
|
|
|
|
2005-04-18 01:44:27 +00:00
|
|
|
/// getExternalFunctionStub - Return a stub for the function at the
|
|
|
|
/// specified address, created lazily on demand.
|
|
|
|
void *getExternalFunctionStub(void *FnAddr);
|
|
|
|
|
2008-01-04 10:46:51 +00:00
|
|
|
/// getGlobalValueLazyPtr - Return a lazy pointer containing the specified
|
|
|
|
/// GV address.
|
|
|
|
void *getGlobalValueLazyPtr(GlobalValue *V, void *GVAddress);
|
|
|
|
|
2004-11-21 03:37:42 +00:00
|
|
|
/// AddCallbackAtLocation - If the target is capable of rewriting an
|
|
|
|
/// instruction without the use of a stub, record the location of the use so
|
|
|
|
/// we know which function is being used at the location.
|
|
|
|
void *AddCallbackAtLocation(Function *F, void *Location) {
|
2005-07-12 15:51:55 +00:00
|
|
|
MutexGuard locked(TheJIT->lock);
|
2004-11-21 03:37:42 +00:00
|
|
|
/// Get the target-specific JIT resolver function.
|
2005-07-12 15:51:55 +00:00
|
|
|
state.getStubToFunctionMap(locked)[Location] = F;
|
2006-06-01 17:29:22 +00:00
|
|
|
return (void*)(intptr_t)LazyResolverFn;
|
2004-11-21 03:37:42 +00:00
|
|
|
}
|
|
|
|
|
2005-07-28 12:44:13 +00:00
|
|
|
/// getGOTIndexForAddress - Return a new or existing index in the GOT for
|
2007-12-05 23:39:57 +00:00
|
|
|
/// an address. This function only manages slots, it does not manage the
|
2005-07-28 12:44:13 +00:00
|
|
|
/// contents of the slots or the memory associated with the GOT.
|
2007-12-05 23:39:57 +00:00
|
|
|
unsigned getGOTIndexForAddr(void *addr);
|
2005-07-28 12:44:13 +00:00
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
/// JITCompilerFn - This function is called to resolve a stub to a compiled
|
|
|
|
/// address. If the LLVM Function corresponding to the stub has not yet
|
|
|
|
/// been compiled, this function compiles it first.
|
|
|
|
static void *JITCompilerFn(void *Stub);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2007-02-24 02:57:03 +00:00
|
|
|
JITResolver *JITResolver::TheJITResolver = 0;
|
2004-11-20 23:57:07 +00:00
|
|
|
|
2006-07-27 06:33:55 +00:00
|
|
|
#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
|
|
|
|
defined(__APPLE__)
|
|
|
|
extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/// synchronizeICache - On some targets, the JIT emitted code must be
|
|
|
|
/// explicitly refetched to ensure correct execution.
|
|
|
|
static void synchronizeICache(const void *Addr, size_t len) {
|
|
|
|
#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
|
|
|
|
defined(__APPLE__)
|
2006-07-27 13:40:34 +00:00
|
|
|
sys_icache_invalidate(Addr, len);
|
2006-07-27 06:33:55 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
/// getFunctionStub - This returns a pointer to a function stub, creating
|
|
|
|
/// one on demand as needed.
|
|
|
|
void *JITResolver::getFunctionStub(Function *F) {
|
2005-07-12 15:51:55 +00:00
|
|
|
MutexGuard locked(TheJIT->lock);
|
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
// If we already have a stub for this function, recycle it.
|
2005-07-12 15:51:55 +00:00
|
|
|
void *&Stub = state.getFunctionToStubMap(locked)[F];
|
2004-11-20 23:57:07 +00:00
|
|
|
if (Stub) return Stub;
|
|
|
|
|
2004-11-22 07:24:43 +00:00
|
|
|
// Call the lazy resolver function unless we already KNOW it is an external
|
|
|
|
// function, in which case we just skip the lazy resolution step.
|
2006-06-01 17:29:22 +00:00
|
|
|
void *Actual = (void*)(intptr_t)LazyResolverFn;
|
2007-07-05 17:07:56 +00:00
|
|
|
if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode())
|
2004-11-22 07:24:43 +00:00
|
|
|
Actual = TheJIT->getPointerToFunction(F);
|
2005-04-21 22:55:34 +00:00
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
// Otherwise, codegen a new stub. For now, the stub will call the lazy
|
|
|
|
// resolver function.
|
2008-04-16 20:46:05 +00:00
|
|
|
Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual,
|
2007-02-24 02:57:03 +00:00
|
|
|
*TheJIT->getCodeEmitter());
|
2004-11-22 07:24:43 +00:00
|
|
|
|
2006-06-01 17:29:22 +00:00
|
|
|
if (Actual != (void*)(intptr_t)LazyResolverFn) {
|
2004-11-22 07:24:43 +00:00
|
|
|
// If we are getting the stub for an external function, we really want the
|
|
|
|
// address of the stub in the GlobalAddressMap for the JIT, not the address
|
|
|
|
// of the external function.
|
|
|
|
TheJIT->updateGlobalMapping(F, Stub);
|
|
|
|
}
|
2004-11-20 23:57:07 +00:00
|
|
|
|
2006-12-07 20:04:42 +00:00
|
|
|
DOUT << "JIT: Stub emitted at [" << Stub << "] for function '"
|
|
|
|
<< F->getName() << "'\n";
|
2004-11-21 03:44:32 +00:00
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
// Finally, keep track of the stub-to-Function mapping so that the
|
|
|
|
// JITCompilerFn knows which function to compile!
|
2005-07-12 15:51:55 +00:00
|
|
|
state.getStubToFunctionMap(locked)[Stub] = F;
|
2004-11-20 23:57:07 +00:00
|
|
|
return Stub;
|
|
|
|
}
|
|
|
|
|
2008-01-04 10:46:51 +00:00
|
|
|
/// getGlobalValueLazyPtr - Return a lazy pointer containing the specified
|
|
|
|
/// GV address.
|
|
|
|
void *JITResolver::getGlobalValueLazyPtr(GlobalValue *GV, void *GVAddress) {
|
|
|
|
MutexGuard locked(TheJIT->lock);
|
|
|
|
|
|
|
|
// If we already have a stub for this global variable, recycle it.
|
|
|
|
void *&LazyPtr = state.getGlobalToLazyPtrMap(locked)[GV];
|
|
|
|
if (LazyPtr) return LazyPtr;
|
|
|
|
|
|
|
|
// Otherwise, codegen a new lazy pointer.
|
2008-04-16 20:46:05 +00:00
|
|
|
LazyPtr = TheJIT->getJITInfo().emitGlobalValueLazyPtr(GV, GVAddress,
|
2008-01-04 10:46:51 +00:00
|
|
|
*TheJIT->getCodeEmitter());
|
|
|
|
|
|
|
|
DOUT << "JIT: Stub emitted at [" << LazyPtr << "] for GV '"
|
|
|
|
<< GV->getName() << "'\n";
|
|
|
|
|
|
|
|
return LazyPtr;
|
|
|
|
}
|
|
|
|
|
2005-04-18 01:44:27 +00:00
|
|
|
/// getExternalFunctionStub - Return a stub for the function at the
|
|
|
|
/// specified address, created lazily on demand.
|
|
|
|
void *JITResolver::getExternalFunctionStub(void *FnAddr) {
|
|
|
|
// If we already have a stub for this function, recycle it.
|
|
|
|
void *&Stub = ExternalFnToStubMap[FnAddr];
|
|
|
|
if (Stub) return Stub;
|
|
|
|
|
2008-04-16 20:46:05 +00:00
|
|
|
Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr,
|
2007-02-24 02:57:03 +00:00
|
|
|
*TheJIT->getCodeEmitter());
|
2006-07-25 20:40:54 +00:00
|
|
|
|
2006-12-07 20:04:42 +00:00
|
|
|
DOUT << "JIT: Stub emitted at [" << Stub
|
|
|
|
<< "] for external function at '" << FnAddr << "'\n";
|
2005-04-18 01:44:27 +00:00
|
|
|
return Stub;
|
|
|
|
}
|
|
|
|
|
2005-07-28 12:44:13 +00:00
|
|
|
unsigned JITResolver::getGOTIndexForAddr(void* addr) {
|
|
|
|
unsigned idx = revGOTMap[addr];
|
|
|
|
if (!idx) {
|
|
|
|
idx = ++nextGOTIndex;
|
|
|
|
revGOTMap[addr] = idx;
|
2008-04-12 00:22:01 +00:00
|
|
|
DOUT << "Adding GOT entry " << idx << " for addr " << addr << "\n";
|
2005-07-28 12:44:13 +00:00
|
|
|
}
|
|
|
|
return idx;
|
|
|
|
}
|
2005-04-18 01:44:27 +00:00
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
/// JITCompilerFn - This function is called when a lazy compilation stub has
|
|
|
|
/// been entered. It looks up which function this stub corresponds to, compiles
|
|
|
|
/// it if necessary, then returns the resultant function pointer.
|
|
|
|
void *JITResolver::JITCompilerFn(void *Stub) {
|
2007-02-24 02:57:03 +00:00
|
|
|
JITResolver &JR = *TheJITResolver;
|
2005-04-21 22:55:34 +00:00
|
|
|
|
2005-07-12 15:51:55 +00:00
|
|
|
MutexGuard locked(TheJIT->lock);
|
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
// The address given to us for the stub may not be exactly right, it might be
|
|
|
|
// a little bit after the stub. As such, use upper_bound to find it.
|
|
|
|
std::map<void*, Function*>::iterator I =
|
2005-07-12 15:51:55 +00:00
|
|
|
JR.state.getStubToFunctionMap(locked).upper_bound(Stub);
|
2006-01-07 06:20:51 +00:00
|
|
|
assert(I != JR.state.getStubToFunctionMap(locked).begin() &&
|
|
|
|
"This is not a known stub!");
|
2004-11-20 23:57:07 +00:00
|
|
|
Function *F = (--I)->second;
|
|
|
|
|
2007-06-30 00:10:37 +00:00
|
|
|
// If we have already code generated the function, just return the address.
|
|
|
|
void *Result = TheJIT->getPointerToGlobalIfAvailable(F);
|
|
|
|
|
|
|
|
if (!Result) {
|
|
|
|
// Otherwise we don't have it, do lazy compilation now.
|
|
|
|
|
|
|
|
// If lazy compilation is disabled, emit a useful error message and abort.
|
|
|
|
if (TheJIT->isLazyCompilationDisabled()) {
|
|
|
|
cerr << "LLVM JIT requested to do lazy compilation of function '"
|
|
|
|
<< F->getName() << "' when lazy compiles are disabled!\n";
|
|
|
|
abort();
|
|
|
|
}
|
2006-11-09 19:32:13 +00:00
|
|
|
|
2007-06-30 00:10:37 +00:00
|
|
|
// We might like to remove the stub from the StubToFunction map.
|
|
|
|
// We can't do that! Multiple threads could be stuck, waiting to acquire the
|
|
|
|
// lock above. As soon as the 1st function finishes compiling the function,
|
|
|
|
// the next one will be released, and needs to be able to find the function
|
|
|
|
// it needs to call.
|
|
|
|
//JR.state.getStubToFunctionMap(locked).erase(I);
|
|
|
|
|
|
|
|
DOUT << "JIT: Lazily resolving function '" << F->getName()
|
|
|
|
<< "' In stub ptr = " << Stub << " actual ptr = "
|
|
|
|
<< I->first << "\n";
|
|
|
|
|
|
|
|
Result = TheJIT->getPointerToFunction(F);
|
|
|
|
}
|
2004-11-20 23:57:07 +00:00
|
|
|
|
|
|
|
// We don't need to reuse this stub in the future, as F is now compiled.
|
2005-07-12 15:51:55 +00:00
|
|
|
JR.state.getFunctionToStubMap(locked).erase(F);
|
2004-11-20 23:57:07 +00:00
|
|
|
|
|
|
|
// FIXME: We could rewrite all references to this stub if we knew them.
|
2005-07-28 12:44:13 +00:00
|
|
|
|
2005-07-30 18:33:25 +00:00
|
|
|
// What we will do is set the compiled function address to map to the
|
|
|
|
// same GOT entry as the stub so that later clients may update the GOT
|
2005-07-28 12:44:13 +00:00
|
|
|
// if they see it still using the stub address.
|
|
|
|
// Note: this is done so the Resolver doesn't have to manage GOT memory
|
|
|
|
// Do this without allocating map space if the target isn't using a GOT
|
|
|
|
if(JR.revGOTMap.find(Stub) != JR.revGOTMap.end())
|
|
|
|
JR.revGOTMap[Result] = JR.revGOTMap[Stub];
|
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2008-04-04 05:51:42 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Function Index Support
|
|
|
|
|
|
|
|
// On MacOS we generate an index of currently JIT'd functions so that
|
|
|
|
// performance tools can determine a symbol name and accurate code range for a
|
|
|
|
// PC value. Because performance tools are generally asynchronous, the code
|
|
|
|
// below is written with the hope that it could be interrupted at any time and
|
|
|
|
// have useful answers. However, we don't go crazy with atomic operations, we
|
|
|
|
// just do a "reasonable effort".
|
|
|
|
#ifdef __APPLE__
|
2008-05-15 17:31:35 +00:00
|
|
|
#define ENABLE_JIT_SYMBOL_TABLE 0
|
2008-04-04 05:51:42 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/// JitSymbolEntry - Each function that is JIT compiled results in one of these
|
|
|
|
/// being added to an array of symbols. This indicates the name of the function
|
|
|
|
/// as well as the address range it occupies. This allows the client to map
|
|
|
|
/// from a PC value to the name of the function.
|
|
|
|
struct JitSymbolEntry {
|
|
|
|
const char *FnName; // FnName - a strdup'd string.
|
|
|
|
void *FnStart;
|
|
|
|
intptr_t FnSize;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct JitSymbolTable {
|
|
|
|
/// NextPtr - This forms a linked list of JitSymbolTable entries. This
|
|
|
|
/// pointer is not used right now, but might be used in the future. Consider
|
|
|
|
/// it reserved for future use.
|
|
|
|
JitSymbolTable *NextPtr;
|
|
|
|
|
|
|
|
/// Symbols - This is an array of JitSymbolEntry entries. Only the first
|
|
|
|
/// 'NumSymbols' symbols are valid.
|
|
|
|
JitSymbolEntry *Symbols;
|
|
|
|
|
|
|
|
/// NumSymbols - This indicates the number entries in the Symbols array that
|
|
|
|
/// are valid.
|
|
|
|
unsigned NumSymbols;
|
|
|
|
|
|
|
|
/// NumAllocated - This indicates the amount of space we have in the Symbols
|
|
|
|
/// array. This is a private field that should not be read by external tools.
|
|
|
|
unsigned NumAllocated;
|
|
|
|
};
|
|
|
|
|
|
|
|
#if ENABLE_JIT_SYMBOL_TABLE
|
|
|
|
JitSymbolTable *__jitSymbolTable;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void AddFunctionToSymbolTable(const char *FnName,
|
|
|
|
void *FnStart, intptr_t FnSize) {
|
|
|
|
assert(FnName != 0 && FnStart != 0 && "Bad symbol to add");
|
|
|
|
JitSymbolTable **SymTabPtrPtr = 0;
|
|
|
|
#if !ENABLE_JIT_SYMBOL_TABLE
|
|
|
|
return;
|
|
|
|
#else
|
|
|
|
SymTabPtrPtr = &__jitSymbolTable;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// If this is the first entry in the symbol table, add the JitSymbolTable
|
|
|
|
// index.
|
|
|
|
if (*SymTabPtrPtr == 0) {
|
|
|
|
JitSymbolTable *New = new JitSymbolTable();
|
|
|
|
New->NextPtr = 0;
|
|
|
|
New->Symbols = 0;
|
|
|
|
New->NumSymbols = 0;
|
|
|
|
New->NumAllocated = 0;
|
|
|
|
*SymTabPtrPtr = New;
|
|
|
|
}
|
|
|
|
|
|
|
|
JitSymbolTable *SymTabPtr = *SymTabPtrPtr;
|
|
|
|
|
|
|
|
// If we have space in the table, reallocate the table.
|
|
|
|
if (SymTabPtr->NumSymbols >= SymTabPtr->NumAllocated) {
|
|
|
|
// If we don't have space, reallocate the table.
|
2008-04-13 07:04:56 +00:00
|
|
|
unsigned NewSize = std::max(64U, SymTabPtr->NumAllocated*2);
|
2008-04-04 05:51:42 +00:00
|
|
|
JitSymbolEntry *NewSymbols = new JitSymbolEntry[NewSize];
|
|
|
|
JitSymbolEntry *OldSymbols = SymTabPtr->Symbols;
|
|
|
|
|
|
|
|
// Copy the old entries over.
|
|
|
|
memcpy(NewSymbols, OldSymbols,
|
2008-04-13 07:04:56 +00:00
|
|
|
SymTabPtr->NumSymbols*sizeof(OldSymbols[0]));
|
2008-04-04 05:51:42 +00:00
|
|
|
|
|
|
|
// Swap the new symbols in, delete the old ones.
|
|
|
|
SymTabPtr->Symbols = NewSymbols;
|
2008-04-13 07:04:56 +00:00
|
|
|
SymTabPtr->NumAllocated = NewSize;
|
2008-04-04 05:51:42 +00:00
|
|
|
delete [] OldSymbols;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we have enough space, just tack it onto the end of the array.
|
|
|
|
JitSymbolEntry &Entry = SymTabPtr->Symbols[SymTabPtr->NumSymbols];
|
|
|
|
Entry.FnName = strdup(FnName);
|
|
|
|
Entry.FnStart = FnStart;
|
|
|
|
Entry.FnSize = FnSize;
|
|
|
|
++SymTabPtr->NumSymbols;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void RemoveFunctionFromSymbolTable(void *FnStart) {
|
|
|
|
assert(FnStart && "Invalid function pointer");
|
|
|
|
JitSymbolTable **SymTabPtrPtr = 0;
|
|
|
|
#if !ENABLE_JIT_SYMBOL_TABLE
|
|
|
|
return;
|
|
|
|
#else
|
|
|
|
SymTabPtrPtr = &__jitSymbolTable;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
JitSymbolTable *SymTabPtr = *SymTabPtrPtr;
|
|
|
|
JitSymbolEntry *Symbols = SymTabPtr->Symbols;
|
|
|
|
|
|
|
|
// Scan the table to find its index. The table is not sorted, so do a linear
|
|
|
|
// scan.
|
|
|
|
unsigned Index;
|
|
|
|
for (Index = 0; Symbols[Index].FnStart != FnStart; ++Index)
|
|
|
|
assert(Index != SymTabPtr->NumSymbols && "Didn't find function!");
|
|
|
|
|
|
|
|
// Once we have an index, we know to nuke this entry, overwrite it with the
|
|
|
|
// entry at the end of the array, making the last entry redundant.
|
|
|
|
const char *OldName = Symbols[Index].FnName;
|
|
|
|
Symbols[Index] = Symbols[SymTabPtr->NumSymbols-1];
|
|
|
|
free((void*)OldName);
|
|
|
|
|
|
|
|
// Drop the number of symbols in the table.
|
|
|
|
--SymTabPtr->NumSymbols;
|
|
|
|
|
|
|
|
// Finally, if we deleted the final symbol, deallocate the table itself.
|
2008-05-18 19:09:10 +00:00
|
|
|
if (SymTabPtr->NumSymbols != 0)
|
2008-04-04 05:51:42 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
*SymTabPtrPtr = 0;
|
|
|
|
delete [] Symbols;
|
|
|
|
delete SymTabPtr;
|
|
|
|
}
|
2004-11-20 23:57:07 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2004-11-22 22:00:25 +00:00
|
|
|
// JITEmitter code.
|
2004-11-20 23:57:07 +00:00
|
|
|
//
|
2003-08-14 18:35:27 +00:00
|
|
|
namespace {
|
2004-11-22 22:00:25 +00:00
|
|
|
/// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
|
|
|
|
/// used to output functions to memory for execution.
|
|
|
|
class JITEmitter : public MachineCodeEmitter {
|
2007-12-05 23:39:57 +00:00
|
|
|
JITMemoryManager *MemMgr;
|
2003-08-14 18:35:27 +00:00
|
|
|
|
2003-05-09 03:30:07 +00:00
|
|
|
// When outputting a function stub in the context of some other function, we
|
2006-05-02 18:27:26 +00:00
|
|
|
// save BufferBegin/BufferEnd/CurBufferPtr here.
|
|
|
|
unsigned char *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
|
2003-06-01 23:24:36 +00:00
|
|
|
|
2004-11-20 03:46:14 +00:00
|
|
|
/// Relocations - These are the relocations that the function needs, as
|
|
|
|
/// emitted.
|
|
|
|
std::vector<MachineRelocation> Relocations;
|
2006-05-03 17:10:41 +00:00
|
|
|
|
|
|
|
/// MBBLocations - This vector is a mapping from MBB ID's to their address.
|
|
|
|
/// It is filled in by the StartMachineBasicBlock callback and queried by
|
|
|
|
/// the getMachineBasicBlockAddress callback.
|
|
|
|
std::vector<intptr_t> MBBLocations;
|
2005-07-22 20:48:12 +00:00
|
|
|
|
2006-02-09 04:49:59 +00:00
|
|
|
/// ConstantPool - The constant pool for the current function.
|
|
|
|
///
|
|
|
|
MachineConstantPool *ConstantPool;
|
|
|
|
|
|
|
|
/// ConstantPoolBase - A pointer to the first entry in the constant pool.
|
|
|
|
///
|
|
|
|
void *ConstantPoolBase;
|
2006-04-22 18:53:45 +00:00
|
|
|
|
2006-09-10 23:03:44 +00:00
|
|
|
/// JumpTable - The jump tables for the current function.
|
2006-04-22 18:53:45 +00:00
|
|
|
///
|
|
|
|
MachineJumpTableInfo *JumpTable;
|
|
|
|
|
|
|
|
/// JumpTableBase - A pointer to the first entry in the jump table.
|
|
|
|
///
|
|
|
|
void *JumpTableBase;
|
2008-01-05 02:26:58 +00:00
|
|
|
|
2007-02-24 02:57:03 +00:00
|
|
|
/// Resolver - This contains info about the currently resolved functions.
|
|
|
|
JITResolver Resolver;
|
2008-02-13 18:39:37 +00:00
|
|
|
|
|
|
|
/// DE - The dwarf emitter for the jit.
|
|
|
|
JITDwarfEmitter *DE;
|
|
|
|
|
|
|
|
/// LabelLocations - This vector is a mapping from Label ID's to their
|
|
|
|
/// address.
|
|
|
|
std::vector<intptr_t> LabelLocations;
|
|
|
|
|
|
|
|
/// MMI - Machine module info for exception informations
|
|
|
|
MachineModuleInfo* MMI;
|
|
|
|
|
2007-02-24 02:57:03 +00:00
|
|
|
public:
|
2007-12-06 01:08:09 +00:00
|
|
|
JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit) {
|
|
|
|
MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
|
2007-12-05 23:39:57 +00:00
|
|
|
if (jit.getJITInfo().needsGOT()) {
|
|
|
|
MemMgr->AllocateGOT();
|
|
|
|
DOUT << "JIT is managing a GOT\n";
|
|
|
|
}
|
2008-02-13 18:39:37 +00:00
|
|
|
|
|
|
|
if (ExceptionHandling) DE = new JITDwarfEmitter(jit);
|
2007-12-05 23:39:57 +00:00
|
|
|
}
|
|
|
|
~JITEmitter() {
|
|
|
|
delete MemMgr;
|
2008-02-13 18:39:37 +00:00
|
|
|
if (ExceptionHandling) delete DE;
|
2005-07-22 20:48:12 +00:00
|
|
|
}
|
2007-02-24 02:57:03 +00:00
|
|
|
|
|
|
|
JITResolver &getJITResolver() { return Resolver; }
|
2002-12-24 00:01:05 +00:00
|
|
|
|
|
|
|
virtual void startFunction(MachineFunction &F);
|
2006-05-02 18:27:26 +00:00
|
|
|
virtual bool finishFunction(MachineFunction &F);
|
2006-05-02 23:22:24 +00:00
|
|
|
|
|
|
|
void emitConstantPool(MachineConstantPool *MCP);
|
|
|
|
void initJumpTableInfo(MachineJumpTableInfo *MJTI);
|
2006-12-14 22:53:42 +00:00
|
|
|
void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
|
2006-05-02 23:22:24 +00:00
|
|
|
|
2008-04-16 20:46:05 +00:00
|
|
|
virtual void startFunctionStub(const GlobalValue* F, unsigned StubSize,
|
|
|
|
unsigned Alignment = 1);
|
|
|
|
virtual void* finishFunctionStub(const GlobalValue *F);
|
2003-06-01 23:24:36 +00:00
|
|
|
|
2004-11-20 03:46:14 +00:00
|
|
|
virtual void addRelocation(const MachineRelocation &MR) {
|
|
|
|
Relocations.push_back(MR);
|
|
|
|
}
|
2006-05-03 17:10:41 +00:00
|
|
|
|
|
|
|
virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
|
|
|
|
if (MBBLocations.size() <= (unsigned)MBB->getNumber())
|
|
|
|
MBBLocations.resize((MBB->getNumber()+1)*2);
|
|
|
|
MBBLocations[MBB->getNumber()] = getCurrentPCValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual intptr_t getConstantPoolEntryAddress(unsigned Entry) const;
|
|
|
|
virtual intptr_t getJumpTableEntryAddress(unsigned Entry) const;
|
2008-01-05 02:26:58 +00:00
|
|
|
|
2006-05-03 17:10:41 +00:00
|
|
|
virtual intptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
|
|
|
|
assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
|
|
|
|
MBBLocations[MBB->getNumber()] && "MBB not emitted!");
|
|
|
|
return MBBLocations[MBB->getNumber()];
|
|
|
|
}
|
2004-11-20 03:46:14 +00:00
|
|
|
|
2006-05-11 23:08:08 +00:00
|
|
|
/// deallocateMemForFunction - Deallocate all memory for the specified
|
|
|
|
/// function body.
|
|
|
|
void deallocateMemForFunction(Function *F) {
|
2007-12-05 23:39:57 +00:00
|
|
|
MemMgr->deallocateMemForFunction(F);
|
2006-05-11 23:08:08 +00:00
|
|
|
}
|
2008-02-13 18:39:37 +00:00
|
|
|
|
|
|
|
virtual void emitLabel(uint64_t LabelID) {
|
|
|
|
if (LabelLocations.size() <= LabelID)
|
|
|
|
LabelLocations.resize((LabelID+1)*2);
|
|
|
|
LabelLocations[LabelID] = getCurrentPCValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual intptr_t getLabelAddress(uint64_t LabelID) const {
|
|
|
|
assert(LabelLocations.size() > (unsigned)LabelID &&
|
|
|
|
LabelLocations[LabelID] && "Label not emitted!");
|
|
|
|
return LabelLocations[LabelID];
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void setModuleInfo(MachineModuleInfo* Info) {
|
|
|
|
MMI = Info;
|
|
|
|
if (ExceptionHandling) DE->setModuleInfo(Info);
|
|
|
|
}
|
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
private:
|
2004-11-21 03:37:42 +00:00
|
|
|
void *getPointerToGlobal(GlobalValue *GV, void *Reference, bool NoNeedStub);
|
2008-01-04 10:46:51 +00:00
|
|
|
void *getPointerToGVLazyPtr(GlobalValue *V, void *Reference,
|
|
|
|
bool NoNeedStub);
|
2002-12-24 00:01:05 +00:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2004-11-22 22:00:25 +00:00
|
|
|
void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
|
|
|
|
bool DoesntNeedStub) {
|
2004-11-20 23:57:07 +00:00
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
|
|
|
|
/// FIXME: If we straightened things out, this could actually emit the
|
|
|
|
/// global immediately instead of queuing it for codegen later!
|
|
|
|
return TheJIT->getOrEmitGlobalVariable(GV);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have already compiled the function, return a pointer to its body.
|
|
|
|
Function *F = cast<Function>(V);
|
|
|
|
void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
|
|
|
|
if (ResultPtr) return ResultPtr;
|
|
|
|
|
2007-07-05 17:07:56 +00:00
|
|
|
if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) {
|
2004-11-20 23:57:07 +00:00
|
|
|
// If this is an external function pointer, we can force the JIT to
|
|
|
|
// 'compile' it, which really just adds it to the map.
|
2004-11-22 07:24:43 +00:00
|
|
|
if (DoesntNeedStub)
|
|
|
|
return TheJIT->getPointerToFunction(F);
|
|
|
|
|
2007-02-24 02:57:03 +00:00
|
|
|
return Resolver.getFunctionStub(F);
|
2004-11-20 23:57:07 +00:00
|
|
|
}
|
|
|
|
|
2004-11-21 03:37:42 +00:00
|
|
|
// Okay, the function has not been compiled yet, if the target callback
|
|
|
|
// mechanism is capable of rewriting the instruction directly, prefer to do
|
|
|
|
// that instead of emitting a stub.
|
|
|
|
if (DoesntNeedStub)
|
2007-02-24 02:57:03 +00:00
|
|
|
return Resolver.AddCallbackAtLocation(F, Reference);
|
2004-11-21 03:37:42 +00:00
|
|
|
|
2004-11-20 23:57:07 +00:00
|
|
|
// Otherwise, we have to emit a lazy resolving stub.
|
2007-02-24 02:57:03 +00:00
|
|
|
return Resolver.getFunctionStub(F);
|
2004-11-20 23:57:07 +00:00
|
|
|
}
|
|
|
|
|
2008-01-04 10:46:51 +00:00
|
|
|
void *JITEmitter::getPointerToGVLazyPtr(GlobalValue *V, void *Reference,
|
|
|
|
bool DoesntNeedStub) {
|
|
|
|
// Make sure GV is emitted first.
|
|
|
|
// FIXME: For now, if the GV is an external function we force the JIT to
|
|
|
|
// compile it so the lazy pointer will contain the fully resolved address.
|
|
|
|
void *GVAddress = getPointerToGlobal(V, Reference, true);
|
|
|
|
return Resolver.getGlobalValueLazyPtr(V, GVAddress);
|
|
|
|
}
|
|
|
|
|
2008-04-18 20:59:31 +00:00
|
|
|
static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP) {
|
|
|
|
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
|
|
|
|
if (Constants.empty()) return 0;
|
|
|
|
|
|
|
|
MachineConstantPoolEntry CPE = Constants.back();
|
|
|
|
unsigned Size = CPE.Offset;
|
|
|
|
const Type *Ty = CPE.isMachineConstantPoolEntry()
|
|
|
|
? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType();
|
|
|
|
Size += TheJIT->getTargetData()->getABITypeSize(Ty);
|
|
|
|
return Size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned GetJumpTableSizeInBytes(MachineJumpTableInfo *MJTI) {
|
|
|
|
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
|
|
|
|
if (JT.empty()) return 0;
|
|
|
|
|
|
|
|
unsigned NumEntries = 0;
|
|
|
|
for (unsigned i = 0, e = JT.size(); i != e; ++i)
|
|
|
|
NumEntries += JT[i].MBBs.size();
|
|
|
|
|
|
|
|
unsigned EntrySize = MJTI->getEntrySize();
|
|
|
|
|
|
|
|
return NumEntries * EntrySize;
|
|
|
|
}
|
|
|
|
|
2008-04-20 23:39:44 +00:00
|
|
|
static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) {
|
2008-04-18 20:59:31 +00:00
|
|
|
if (Alignment == 0) Alignment = 1;
|
2008-04-20 23:39:44 +00:00
|
|
|
// Since we do not know where the buffer will be allocated, be pessimistic.
|
|
|
|
return Size + Alignment;
|
2008-04-18 20:59:31 +00:00
|
|
|
}
|
2008-01-04 10:46:51 +00:00
|
|
|
|
2004-11-22 22:00:25 +00:00
|
|
|
void JITEmitter::startFunction(MachineFunction &F) {
|
2008-04-18 20:59:31 +00:00
|
|
|
uintptr_t ActualSize = 0;
|
2008-04-20 17:44:19 +00:00
|
|
|
if (MemMgr->NeedsExactSize()) {
|
2008-04-18 20:59:31 +00:00
|
|
|
const TargetInstrInfo* TII = F.getTarget().getInstrInfo();
|
|
|
|
MachineJumpTableInfo *MJTI = F.getJumpTableInfo();
|
|
|
|
MachineConstantPool *MCP = F.getConstantPool();
|
|
|
|
|
|
|
|
// Ensure the constant pool/jump table info is at least 4-byte aligned.
|
2008-04-20 23:39:44 +00:00
|
|
|
ActualSize = RoundUpToAlign(ActualSize, 16);
|
2008-04-18 20:59:31 +00:00
|
|
|
|
|
|
|
// Add the alignment of the constant pool
|
2008-04-20 23:39:44 +00:00
|
|
|
ActualSize = RoundUpToAlign(ActualSize,
|
|
|
|
1 << MCP->getConstantPoolAlignment());
|
2008-04-18 20:59:31 +00:00
|
|
|
|
|
|
|
// Add the constant pool size
|
|
|
|
ActualSize += GetConstantPoolSizeInBytes(MCP);
|
|
|
|
|
|
|
|
// Add the aligment of the jump table info
|
2008-04-20 23:39:44 +00:00
|
|
|
ActualSize = RoundUpToAlign(ActualSize, MJTI->getAlignment());
|
2008-04-18 20:59:31 +00:00
|
|
|
|
|
|
|
// Add the jump table size
|
|
|
|
ActualSize += GetJumpTableSizeInBytes(MJTI);
|
|
|
|
|
|
|
|
// Add the alignment for the function
|
2008-04-20 23:39:44 +00:00
|
|
|
ActualSize = RoundUpToAlign(ActualSize,
|
|
|
|
std::max(F.getFunction()->getAlignment(), 8U));
|
2008-04-18 20:59:31 +00:00
|
|
|
|
|
|
|
// Add the function size
|
|
|
|
ActualSize += TII->GetFunctionSizeInBytes(F);
|
|
|
|
}
|
|
|
|
|
2007-12-05 23:39:57 +00:00
|
|
|
BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(),
|
|
|
|
ActualSize);
|
2006-05-11 23:08:08 +00:00
|
|
|
BufferEnd = BufferBegin+ActualSize;
|
2006-05-02 23:22:24 +00:00
|
|
|
|
2006-11-16 20:04:54 +00:00
|
|
|
// Ensure the constant pool/jump table info is at least 4-byte aligned.
|
|
|
|
emitAlignment(16);
|
|
|
|
|
2006-05-02 23:22:24 +00:00
|
|
|
emitConstantPool(F.getConstantPool());
|
|
|
|
initJumpTableInfo(F.getJumpTableInfo());
|
|
|
|
|
|
|
|
// About to start emitting the machine code for the function.
|
2006-05-03 01:03:20 +00:00
|
|
|
emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
|
2006-05-02 23:22:24 +00:00
|
|
|
TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr);
|
2006-07-25 20:40:54 +00:00
|
|
|
|
2006-05-03 17:10:41 +00:00
|
|
|
MBBLocations.clear();
|
2002-12-24 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2006-05-02 18:27:26 +00:00
|
|
|
bool JITEmitter::finishFunction(MachineFunction &F) {
|
2006-05-11 23:08:08 +00:00
|
|
|
if (CurBufferPtr == BufferEnd) {
|
|
|
|
// FIXME: Allocate more space, then try again.
|
2006-12-07 20:04:42 +00:00
|
|
|
cerr << "JIT: Ran out of space for generated machine code!\n";
|
2006-05-11 23:08:08 +00:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2006-12-14 22:53:42 +00:00
|
|
|
emitJumpTableInfo(F.getJumpTableInfo());
|
2006-05-03 17:10:41 +00:00
|
|
|
|
2006-06-16 18:09:26 +00:00
|
|
|
// FnStart is the start of the text, not the start of the constant pool and
|
|
|
|
// other per-function data.
|
|
|
|
unsigned char *FnStart =
|
|
|
|
(unsigned char *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction());
|
|
|
|
unsigned char *FnEnd = CurBufferPtr;
|
|
|
|
|
2007-12-05 23:39:57 +00:00
|
|
|
MemMgr->endFunctionBody(F.getFunction(), BufferBegin, FnEnd);
|
2006-06-16 18:09:26 +00:00
|
|
|
NumBytes += FnEnd-FnStart;
|
2002-12-24 00:01:05 +00:00
|
|
|
|
2004-11-20 03:46:14 +00:00
|
|
|
if (!Relocations.empty()) {
|
2005-07-20 16:29:20 +00:00
|
|
|
NumRelos += Relocations.size();
|
|
|
|
|
2004-11-20 03:46:14 +00:00
|
|
|
// Resolve the relocations to concrete pointers.
|
|
|
|
for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
|
|
|
|
MachineRelocation &MR = Relocations[i];
|
|
|
|
void *ResultPtr;
|
2005-04-18 01:44:27 +00:00
|
|
|
if (MR.isString()) {
|
2004-11-20 03:46:14 +00:00
|
|
|
ResultPtr = TheJIT->getPointerToNamedFunction(MR.getString());
|
2005-04-21 22:55:34 +00:00
|
|
|
|
2005-04-18 01:44:27 +00:00
|
|
|
// If the target REALLY wants a stub for this function, emit it now.
|
2008-01-03 02:56:28 +00:00
|
|
|
if (!MR.doesntNeedStub())
|
2007-02-24 02:57:03 +00:00
|
|
|
ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
|
2006-05-03 18:55:56 +00:00
|
|
|
} else if (MR.isGlobalValue()) {
|
2004-11-21 03:37:42 +00:00
|
|
|
ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
|
2006-05-02 18:27:26 +00:00
|
|
|
BufferBegin+MR.getMachineCodeOffset(),
|
2008-01-03 02:56:28 +00:00
|
|
|
MR.doesntNeedStub());
|
2008-01-04 10:46:51 +00:00
|
|
|
} else if (MR.isGlobalValueLazyPtr()) {
|
|
|
|
ResultPtr = getPointerToGVLazyPtr(MR.getGlobalValue(),
|
|
|
|
BufferBegin+MR.getMachineCodeOffset(),
|
|
|
|
MR.doesntNeedStub());
|
2006-07-27 18:21:10 +00:00
|
|
|
} else if (MR.isBasicBlock()) {
|
|
|
|
ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
|
2006-12-14 19:17:33 +00:00
|
|
|
} else if (MR.isConstantPoolIndex()) {
|
2006-05-03 18:55:56 +00:00
|
|
|
ResultPtr=(void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
|
2006-06-23 01:02:37 +00:00
|
|
|
} else {
|
|
|
|
assert(MR.isJumpTableIndex());
|
|
|
|
ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
|
2006-05-03 18:55:56 +00:00
|
|
|
}
|
2005-07-27 06:12:32 +00:00
|
|
|
|
2004-11-20 03:46:14 +00:00
|
|
|
MR.setResultPointer(ResultPtr);
|
2005-07-22 20:48:12 +00:00
|
|
|
|
2005-07-28 12:44:13 +00:00
|
|
|
// if we are managing the GOT and the relocation wants an index,
|
|
|
|
// give it one
|
2007-12-05 23:39:57 +00:00
|
|
|
if (MR.isGOTRelative() && MemMgr->isManagingGOT()) {
|
2007-02-24 02:57:03 +00:00
|
|
|
unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr);
|
2005-07-28 12:44:13 +00:00
|
|
|
MR.setGOTIndex(idx);
|
2007-12-05 23:39:57 +00:00
|
|
|
if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) {
|
2006-12-07 20:04:42 +00:00
|
|
|
DOUT << "GOT was out of date for " << ResultPtr
|
2007-12-05 23:39:57 +00:00
|
|
|
<< " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
|
2006-12-07 20:04:42 +00:00
|
|
|
<< "\n";
|
2007-12-05 23:39:57 +00:00
|
|
|
((void**)MemMgr->getGOTBase())[idx] = ResultPtr;
|
2005-07-28 12:44:13 +00:00
|
|
|
}
|
2005-07-22 20:48:12 +00:00
|
|
|
}
|
2004-11-20 03:46:14 +00:00
|
|
|
}
|
|
|
|
|
2006-05-02 18:27:26 +00:00
|
|
|
TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
|
2007-12-05 23:39:57 +00:00
|
|
|
Relocations.size(), MemMgr->getGOTBase());
|
2004-11-20 03:46:14 +00:00
|
|
|
}
|
|
|
|
|
2006-05-03 18:55:56 +00:00
|
|
|
// Update the GOT entry for F to point to the new code.
|
2007-12-05 23:39:57 +00:00
|
|
|
if (MemMgr->isManagingGOT()) {
|
2007-02-24 02:57:03 +00:00
|
|
|
unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin);
|
2007-12-05 23:39:57 +00:00
|
|
|
if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) {
|
2006-12-07 20:04:42 +00:00
|
|
|
DOUT << "GOT was out of date for " << (void*)BufferBegin
|
2007-12-05 23:39:57 +00:00
|
|
|
<< " pointing at " << ((void**)MemMgr->getGOTBase())[idx] << "\n";
|
|
|
|
((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin;
|
2005-07-28 12:44:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-25 20:40:54 +00:00
|
|
|
// Invalidate the icache if necessary.
|
2006-07-27 06:33:55 +00:00
|
|
|
synchronizeICache(FnStart, FnEnd-FnStart);
|
2008-04-04 05:51:42 +00:00
|
|
|
|
|
|
|
// Add it to the JIT symbol table if the host wants it.
|
|
|
|
AddFunctionToSymbolTable(F.getFunction()->getNameStart(),
|
|
|
|
FnStart, FnEnd-FnStart);
|
2006-07-25 20:40:54 +00:00
|
|
|
|
2006-12-07 20:04:42 +00:00
|
|
|
DOUT << "JIT: Finished CodeGen of [" << (void*)FnStart
|
|
|
|
<< "] Function: " << F.getFunction()->getName()
|
|
|
|
<< ": " << (FnEnd-FnStart) << " bytes of text, "
|
|
|
|
<< Relocations.size() << " relocations\n";
|
2004-11-20 03:46:14 +00:00
|
|
|
Relocations.clear();
|
2007-01-19 17:25:17 +00:00
|
|
|
|
2007-01-20 20:51:43 +00:00
|
|
|
#ifndef NDEBUG
|
2007-03-06 05:32:48 +00:00
|
|
|
if (sys::hasDisassembler())
|
|
|
|
DOUT << "Disassembled code:\n"
|
|
|
|
<< sys::disassembleBuffer(FnStart, FnEnd-FnStart, (uintptr_t)FnStart);
|
2007-01-19 17:25:17 +00:00
|
|
|
#endif
|
2008-02-13 18:39:37 +00:00
|
|
|
if (ExceptionHandling) {
|
2008-04-18 20:59:31 +00:00
|
|
|
uintptr_t ActualSize = 0;
|
2008-02-13 18:39:37 +00:00
|
|
|
SavedBufferBegin = BufferBegin;
|
|
|
|
SavedBufferEnd = BufferEnd;
|
|
|
|
SavedCurBufferPtr = CurBufferPtr;
|
2008-04-18 20:59:31 +00:00
|
|
|
|
2008-04-20 17:44:19 +00:00
|
|
|
if (MemMgr->NeedsExactSize()) {
|
|
|
|
ActualSize = DE->GetDwarfTableSizeInBytes(F, *this, FnStart, FnEnd);
|
2008-04-18 20:59:31 +00:00
|
|
|
}
|
2008-02-13 18:39:37 +00:00
|
|
|
|
|
|
|
BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(),
|
|
|
|
ActualSize);
|
|
|
|
BufferEnd = BufferBegin+ActualSize;
|
|
|
|
unsigned char* FrameRegister = DE->EmitDwarfTable(F, *this, FnStart, FnEnd);
|
2008-03-07 20:05:43 +00:00
|
|
|
MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr,
|
|
|
|
FrameRegister);
|
2008-02-13 18:39:37 +00:00
|
|
|
BufferBegin = SavedBufferBegin;
|
|
|
|
BufferEnd = SavedBufferEnd;
|
|
|
|
CurBufferPtr = SavedCurBufferPtr;
|
|
|
|
|
|
|
|
TheJIT->RegisterTable(FrameRegister);
|
|
|
|
}
|
|
|
|
MMI->EndFunction();
|
|
|
|
|
2006-05-02 18:27:26 +00:00
|
|
|
return false;
|
2002-12-24 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2004-11-22 22:00:25 +00:00
|
|
|
void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
|
2006-02-09 04:22:52 +00:00
|
|
|
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
|
2003-11-30 04:23:21 +00:00
|
|
|
if (Constants.empty()) return;
|
|
|
|
|
2006-09-12 20:59:59 +00:00
|
|
|
MachineConstantPoolEntry CPE = Constants.back();
|
|
|
|
unsigned Size = CPE.Offset;
|
|
|
|
const Type *Ty = CPE.isMachineConstantPoolEntry()
|
2006-09-13 16:21:10 +00:00
|
|
|
? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType();
|
Executive summary: getTypeSize -> getTypeStoreSize / getABITypeSize.
The meaning of getTypeSize was not clear - clarifying it is important
now that we have x86 long double and arbitrary precision integers.
The issue with long double is that it requires 80 bits, and this is
not a multiple of its alignment. This gives a primitive type for
which getTypeSize differed from getABITypeSize. For arbitrary precision
integers it is even worse: there is the minimum number of bits needed to
hold the type (eg: 36 for an i36), the maximum number of bits that will
be overwriten when storing the type (40 bits for i36) and the ABI size
(i.e. the storage size rounded up to a multiple of the alignment; 64 bits
for i36).
This patch removes getTypeSize (not really - it is still there but
deprecated to allow for a gradual transition). Instead there is:
(1) getTypeSizeInBits - a number of bits that suffices to hold all
values of the type. For a primitive type, this is the minimum number
of bits. For an i36 this is 36 bits. For x86 long double it is 80.
This corresponds to gcc's TYPE_PRECISION.
(2) getTypeStoreSizeInBits - the maximum number of bits that is
written when storing the type (or read when reading it). For an
i36 this is 40 bits, for an x86 long double it is 80 bits. This
is the size alias analysis is interested in (getTypeStoreSize
returns the number of bytes). There doesn't seem to be anything
corresponding to this in gcc.
(3) getABITypeSizeInBits - this is getTypeStoreSizeInBits rounded
up to a multiple of the alignment. For an i36 this is 64, for an
x86 long double this is 96 or 128 depending on the OS. This is the
spacing between consecutive elements when you form an array out of
this type (getABITypeSize returns the number of bytes). This is
TYPE_SIZE in gcc.
Since successive elements in a SequentialType (arrays, pointers
and vectors) need to be aligned, the spacing between them will be
given by getABITypeSize. This means that the size of an array
is the length times the getABITypeSize. It also means that GEP
computations need to use getABITypeSize when computing offsets.
Furthermore, if an alloca allocates several elements at once then
these too need to be aligned, so the size of the alloca has to be
the number of elements multiplied by getABITypeSize. Logically
speaking this doesn't have to be the case when allocating just
one element, but it is simpler to also use getABITypeSize in this
case. So alloca's and mallocs should use getABITypeSize. Finally,
since gcc's only notion of size is that given by getABITypeSize, if
you want to output assembler etc the same as gcc then getABITypeSize
is the size you want.
Since a store will overwrite no more than getTypeStoreSize bytes,
and a read will read no more than that many bytes, this is the
notion of size appropriate for alias analysis calculations.
In this patch I have corrected all type size uses except some of
those in ScalarReplAggregates, lib/Codegen, lib/Target (the hard
cases). I will get around to auditing these too at some point,
but I could do with some help.
Finally, I made one change which I think wise but others might
consider pointless and suboptimal: in an unpacked struct the
amount of space allocated for a field is now given by the ABI
size rather than getTypeStoreSize. I did this because every
other place that reserves memory for a type (eg: alloca) now
uses getABITypeSize, and I didn't want to make an exception
for unpacked structs, i.e. I did it to make things more uniform.
This only effects structs containing long doubles and arbitrary
precision integers. If someone wants to pack these types more
tightly they can always use a packed struct.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43620 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-01 20:53:16 +00:00
|
|
|
Size += TheJIT->getTargetData()->getABITypeSize(Ty);
|
2006-02-09 04:46:04 +00:00
|
|
|
|
2008-04-12 00:22:01 +00:00
|
|
|
unsigned Align = 1 << MCP->getConstantPoolAlignment();
|
|
|
|
ConstantPoolBase = allocateSpace(Size, Align);
|
2006-02-09 04:49:59 +00:00
|
|
|
ConstantPool = MCP;
|
2006-05-02 23:22:24 +00:00
|
|
|
|
|
|
|
if (ConstantPoolBase == 0) return; // Buffer overflow.
|
|
|
|
|
2008-04-12 00:22:01 +00:00
|
|
|
DOUT << "JIT: Emitted constant pool at [" << ConstantPoolBase
|
|
|
|
<< "] (size: " << Size << ", alignment: " << Align << ")\n";
|
|
|
|
|
2006-02-09 04:49:59 +00:00
|
|
|
// Initialize the memory for all of the constant pool entries.
|
2006-02-09 04:46:04 +00:00
|
|
|
for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
|
2006-02-09 04:49:59 +00:00
|
|
|
void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset;
|
2006-09-12 20:59:59 +00:00
|
|
|
if (Constants[i].isMachineConstantPoolEntry()) {
|
|
|
|
// FIXME: add support to lower machine constant pool values into bytes!
|
2006-12-07 20:04:42 +00:00
|
|
|
cerr << "Initialize memory with machine specific constant pool entry"
|
|
|
|
<< " has not been implemented!\n";
|
2006-09-12 20:59:59 +00:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
TheJIT->InitializeMemory(Constants[i].Val.ConstVal, CAddr);
|
2008-04-12 00:22:01 +00:00
|
|
|
DOUT << "JIT: CP" << i << " at [" << CAddr << "]\n";
|
2003-01-13 01:00:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-22 18:53:45 +00:00
|
|
|
void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
|
|
|
|
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
|
|
|
|
if (JT.empty()) return;
|
|
|
|
|
2006-05-02 23:22:24 +00:00
|
|
|
unsigned NumEntries = 0;
|
2006-04-22 18:53:45 +00:00
|
|
|
for (unsigned i = 0, e = JT.size(); i != e; ++i)
|
2006-05-02 23:22:24 +00:00
|
|
|
NumEntries += JT[i].MBBs.size();
|
|
|
|
|
|
|
|
unsigned EntrySize = MJTI->getEntrySize();
|
|
|
|
|
2006-04-22 18:53:45 +00:00
|
|
|
// Just allocate space for all the jump tables now. We will fix up the actual
|
|
|
|
// MBB entries in the tables after we emit the code for each block, since then
|
|
|
|
// we will know the final locations of the MBBs in memory.
|
|
|
|
JumpTable = MJTI;
|
2006-05-02 23:22:24 +00:00
|
|
|
JumpTableBase = allocateSpace(NumEntries * EntrySize, MJTI->getAlignment());
|
2006-04-22 18:53:45 +00:00
|
|
|
}
|
|
|
|
|
2006-12-14 22:53:42 +00:00
|
|
|
void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
|
2006-04-22 18:53:45 +00:00
|
|
|
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
|
2006-05-02 23:22:24 +00:00
|
|
|
if (JT.empty() || JumpTableBase == 0) return;
|
2006-04-22 18:53:45 +00:00
|
|
|
|
2006-12-14 22:53:42 +00:00
|
|
|
if (TargetMachine::getRelocationModel() == Reloc::PIC_) {
|
2006-12-14 19:17:33 +00:00
|
|
|
assert(MJTI->getEntrySize() == 4 && "Cross JIT'ing?");
|
|
|
|
// For each jump table, place the offset from the beginning of the table
|
|
|
|
// to the target address.
|
|
|
|
int *SlotPtr = (int*)JumpTableBase;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = JT.size(); i != e; ++i) {
|
|
|
|
const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
|
|
|
|
// Store the offset of the basic block for this jump table slot in the
|
|
|
|
// memory we allocated for the jump table in 'initJumpTableInfo'
|
|
|
|
intptr_t Base = (intptr_t)SlotPtr;
|
2008-01-05 02:26:58 +00:00
|
|
|
for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) {
|
|
|
|
intptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]);
|
|
|
|
*SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base);
|
|
|
|
}
|
2006-12-14 19:17:33 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(MJTI->getEntrySize() == sizeof(void*) && "Cross JIT'ing?");
|
|
|
|
|
|
|
|
// For each jump table, map each target in the jump table to the address of
|
|
|
|
// an emitted MachineBasicBlock.
|
|
|
|
intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = JT.size(); i != e; ++i) {
|
|
|
|
const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
|
|
|
|
// Store the address of the basic block for this jump table slot in the
|
|
|
|
// memory we allocated for the jump table in 'initJumpTableInfo'
|
|
|
|
for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
|
|
|
|
*SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]);
|
|
|
|
}
|
2006-04-22 18:53:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-16 20:46:05 +00:00
|
|
|
void JITEmitter::startFunctionStub(const GlobalValue* F, unsigned StubSize,
|
|
|
|
unsigned Alignment) {
|
2006-05-02 18:27:26 +00:00
|
|
|
SavedBufferBegin = BufferBegin;
|
|
|
|
SavedBufferEnd = BufferEnd;
|
|
|
|
SavedCurBufferPtr = CurBufferPtr;
|
|
|
|
|
2008-04-16 20:46:05 +00:00
|
|
|
BufferBegin = CurBufferPtr = MemMgr->allocateStub(F, StubSize, Alignment);
|
2006-05-02 18:27:26 +00:00
|
|
|
BufferEnd = BufferBegin+StubSize+1;
|
2003-05-09 03:30:07 +00:00
|
|
|
}
|
|
|
|
|
2008-04-16 20:46:05 +00:00
|
|
|
void *JITEmitter::finishFunctionStub(const GlobalValue* F) {
|
2006-05-02 18:27:26 +00:00
|
|
|
NumBytes += getCurrentPCOffset();
|
|
|
|
std::swap(SavedBufferBegin, BufferBegin);
|
|
|
|
BufferEnd = SavedBufferEnd;
|
|
|
|
CurBufferPtr = SavedCurBufferPtr;
|
|
|
|
return SavedBufferBegin;
|
2003-06-01 23:24:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry
|
|
|
|
// in the constant pool that was last emitted with the 'emitConstantPool'
|
|
|
|
// method.
|
|
|
|
//
|
2006-05-03 17:10:41 +00:00
|
|
|
intptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
|
2006-02-09 04:49:59 +00:00
|
|
|
assert(ConstantNum < ConstantPool->getConstants().size() &&
|
2005-04-22 04:08:30 +00:00
|
|
|
"Invalid ConstantPoolIndex!");
|
2006-02-09 04:49:59 +00:00
|
|
|
return (intptr_t)ConstantPoolBase +
|
|
|
|
ConstantPool->getConstants()[ConstantNum].Offset;
|
2003-06-01 23:24:36 +00:00
|
|
|
}
|
|
|
|
|
2006-04-22 18:53:45 +00:00
|
|
|
// getJumpTableEntryAddress - Return the address of the JumpTable with index
|
|
|
|
// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo'
|
|
|
|
//
|
2006-05-03 17:10:41 +00:00
|
|
|
intptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
|
2006-04-22 18:53:45 +00:00
|
|
|
const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
|
|
|
|
assert(Index < JT.size() && "Invalid jump table index!");
|
|
|
|
|
|
|
|
unsigned Offset = 0;
|
|
|
|
unsigned EntrySize = JumpTable->getEntrySize();
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < Index; ++i)
|
2006-12-14 19:17:33 +00:00
|
|
|
Offset += JT[i].MBBs.size();
|
|
|
|
|
|
|
|
Offset *= EntrySize;
|
2006-04-22 18:53:45 +00:00
|
|
|
|
2006-04-25 17:46:32 +00:00
|
|
|
return (intptr_t)((char *)JumpTableBase + Offset);
|
2006-04-22 18:53:45 +00:00
|
|
|
}
|
|
|
|
|
2006-05-11 23:08:08 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Public interface to this file
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-12-06 01:08:09 +00:00
|
|
|
MachineCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM) {
|
|
|
|
return new JITEmitter(jit, JMM);
|
2006-05-11 23:08:08 +00:00
|
|
|
}
|
|
|
|
|
2003-07-28 19:09:06 +00:00
|
|
|
// getPointerToNamedFunction - This function is used as a global wrapper to
|
2003-12-20 01:46:27 +00:00
|
|
|
// JIT::getPointerToNamedFunction for the purpose of resolving symbols when
|
2003-07-28 19:09:06 +00:00
|
|
|
// bugpoint is debugging the JIT. In that scenario, we are loading an .so and
|
|
|
|
// need to resolve function(s) that are being mis-codegenerated, so we need to
|
|
|
|
// resolve their addresses at runtime, and this is the way to do it.
|
|
|
|
extern "C" {
|
|
|
|
void *getPointerToNamedFunction(const char *Name) {
|
2006-08-16 01:24:12 +00:00
|
|
|
if (Function *F = TheJIT->FindFunctionNamed(Name))
|
2003-12-20 01:46:27 +00:00
|
|
|
return TheJIT->getPointerToFunction(F);
|
|
|
|
return TheJIT->getPointerToNamedFunction(Name);
|
2003-07-28 19:09:06 +00:00
|
|
|
}
|
|
|
|
}
|
2006-05-11 23:08:08 +00:00
|
|
|
|
|
|
|
// getPointerToFunctionOrStub - If the specified function has been
|
|
|
|
// code-gen'd, return a pointer to the function. If not, compile it, or use
|
|
|
|
// a stub to implement lazy compilation if available.
|
|
|
|
//
|
|
|
|
void *JIT::getPointerToFunctionOrStub(Function *F) {
|
|
|
|
// If we have already code generated the function, just return the address.
|
|
|
|
if (void *Addr = getPointerToGlobalIfAvailable(F))
|
|
|
|
return Addr;
|
|
|
|
|
2007-02-24 02:57:03 +00:00
|
|
|
// Get a stub if the target supports it.
|
|
|
|
assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?");
|
|
|
|
JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter());
|
|
|
|
return JE->getJITResolver().getFunctionStub(F);
|
2006-05-11 23:08:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// freeMachineCodeForFunction - release machine code memory for given Function.
|
|
|
|
///
|
|
|
|
void JIT::freeMachineCodeForFunction(Function *F) {
|
2008-04-04 05:51:42 +00:00
|
|
|
|
2006-05-11 23:08:08 +00:00
|
|
|
// Delete translation for this from the ExecutionEngine, so it will get
|
|
|
|
// retranslated next time it is used.
|
2008-04-04 05:51:42 +00:00
|
|
|
void *OldPtr = updateGlobalMapping(F, 0);
|
|
|
|
|
|
|
|
if (OldPtr)
|
|
|
|
RemoveFunctionFromSymbolTable(OldPtr);
|
2006-05-11 23:08:08 +00:00
|
|
|
|
|
|
|
// Free the actual memory for the function body and related stuff.
|
|
|
|
assert(dynamic_cast<JITEmitter*>(MCE) && "Unexpected MCE?");
|
2007-02-24 02:57:03 +00:00
|
|
|
static_cast<JITEmitter*>(MCE)->deallocateMemForFunction(F);
|
2006-05-11 23:08:08 +00:00
|
|
|
}
|
|
|
|
|