This patch adds memory support functions which will later be used to implement section-specific protection handling in MCJIT.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164249 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Andrew Kaylor
2012-09-19 20:46:12 +00:00
parent 7b6f2034ac
commit bbf628b6ce
6 changed files with 747 additions and 90 deletions

View File

@@ -15,6 +15,7 @@
#define LLVM_SYSTEM_MEMORY_H
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/system_error.h"
#include <string>
namespace llvm {
@@ -43,6 +44,70 @@ namespace sys {
/// @brief An abstraction for memory operations.
class Memory {
public:
enum ProtectionFlags {
MF_READ = 0x1000000,
MF_WRITE = 0x2000000,
MF_EXEC = 0x4000000
};
/// This method allocates a block of memory that is suitable for loading
/// dynamically generated code (e.g. JIT). An attempt to allocate
/// \p NumBytes bytes of virtual memory is made.
/// \p NearBlock may point to an existing allocation in which case
/// an attempt is made to allocate more memory near the existing block.
/// The actual allocated address is not guaranteed to be near the requested
/// address.
/// \p Flags is used to set the initial protection flags for the block
/// of the memory.
/// \p EC [out] returns an object describing any error that occurs.
///
/// This method may allocate more than the number of bytes requested. The
/// actual number of bytes allocated is indicated in the returned
/// MemoryBlock.
///
/// The start of the allocated block must be aligned with the
/// system allocation granularity (64K on Windows, page size on Linux).
/// If the address following \p NearBlock is not so aligned, it will be
/// rounded up to the next allocation granularity boundary.
///
/// \r a non-null MemoryBlock if the function was successful,
/// otherwise a null MemoryBlock is with \p EC describing the error.
///
/// @brief Allocate mapped memory.
static MemoryBlock allocateMappedMemory(size_t NumBytes,
const MemoryBlock *const NearBlock,
unsigned Flags,
error_code &EC);
/// This method releases a block of memory that was allocated with the
/// allocateMappedMemory method. It should not be used to release any
/// memory block allocated any other way.
/// \p Block describes the memory to be released.
///
/// \r error_success if the function was successful, or an error_code
/// describing the failure if an error occurred.
///
/// @brief Release mapped memory.
static error_code releaseMappedMemory(MemoryBlock &Block);
/// This method sets the protection flags for a block of memory to the
/// state specified by /p Flags. The behavior is not specified if the
/// memory was not allocated using the allocateMappedMemory method.
/// \p Block describes the memory block to be protected.
/// \p Flags specifies the new protection state to be assigned to the block.
/// \p ErrMsg [out] returns a string describing any error that occured.
///
/// If \p Flags is MF_WRITE, the actual behavior varies
/// with the operating system (i.e. MF_READWRITE on Windows) and the
/// target architecture (i.e. MF_WRITE -> MF_READWRITE on i386).
///
/// \r error_success if the function was successful, or an error_code
/// describing the failure if an error occurred.
///
/// @brief Set memory protection state.
static error_code protectMappedMemory(const MemoryBlock &Block,
unsigned Flags);
/// This method allocates a block of Read/Write/Execute memory that is
/// suitable for executing dynamically generated code (e.g. JIT). An
/// attempt to allocate \p NumBytes bytes of virtual memory is made.

View File

@@ -16,10 +16,6 @@
#include "llvm/Support/Valgrind.h"
#include "llvm/Config/config.h"
namespace llvm {
using namespace sys;
}
// Include the platform-specific parts of this class.
#ifdef LLVM_ON_UNIX
#include "Unix/Memory.inc"
@@ -27,51 +23,3 @@ using namespace sys;
#ifdef LLVM_ON_WIN32
#include "Windows/Memory.inc"
#endif
extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
/// InvalidateInstructionCache - Before the JIT can run a block of code
/// that has been emitted it must invalidate the instruction cache on some
/// platforms.
void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
size_t Len) {
// icache invalidation for PPC and ARM.
#if defined(__APPLE__)
# if (defined(__POWERPC__) || defined (__ppc__) || \
defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
sys_icache_invalidate(const_cast<void *>(Addr), Len);
# endif
#else
# if (defined(__POWERPC__) || defined (__ppc__) || \
defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
const size_t LineSize = 32;
const intptr_t Mask = ~(LineSize - 1);
const intptr_t StartLine = ((intptr_t) Addr) & Mask;
const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
asm volatile("dcbf 0, %0" : : "r"(Line));
asm volatile("sync");
for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
asm volatile("icbi 0, %0" : : "r"(Line));
asm volatile("isync");
# elif defined(__arm__) && defined(__GNUC__)
// FIXME: Can we safely always call this for __GNUC__ everywhere?
const char *Start = static_cast<const char *>(Addr);
const char *End = Start + Len;
__clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
# elif defined(__mips__)
const char *Start = static_cast<const char *>(Addr);
cacheflush(const_cast<char *>(Start), Len, BCACHE);
# endif
#endif // end apple
ValgrindDiscardTranslations(Addr, Len);
}

View File

@@ -13,6 +13,7 @@
#include "Unix.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Process.h"
#ifdef HAVE_SYS_MMAN_H
@@ -31,14 +32,138 @@
# endif
#endif
extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
namespace {
int getPosixProtectionFlags(unsigned Flags) {
switch (Flags) {
case llvm::sys::Memory::MF_READ:
return PROT_READ;
case llvm::sys::Memory::MF_WRITE:
return PROT_WRITE;
case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
return PROT_READ | PROT_WRITE;
case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
return PROT_READ | PROT_EXEC;
case llvm::sys::Memory::MF_READ |
llvm::sys::Memory::MF_WRITE |
llvm::sys::Memory::MF_EXEC:
return PROT_READ | PROT_WRITE | PROT_EXEC;
case llvm::sys::Memory::MF_EXEC:
return PROT_EXEC;
default:
llvm_unreachable("Illegal memory protection flag specified!");
}
// Provide a default return value as required by some compilers.
return PROT_NONE;
}
} // namespace
namespace llvm {
namespace sys {
MemoryBlock
Memory::allocateMappedMemory(size_t NumBytes,
const MemoryBlock *const NearBlock,
unsigned PFlags,
error_code &EC) {
EC = error_code::success();
if (NumBytes == 0)
return MemoryBlock();
static const size_t PageSize = Process::GetPageSize();
const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
int fd = -1;
#ifdef NEED_DEV_ZERO_FOR_MMAP
static int zero_fd = open("/dev/zero", O_RDWR);
if (zero_fd == -1) {
EC = error_code(errno, system_category());
return MemoryBlock();
}
fd = zero_fd;
#endif
int MMFlags = MAP_PRIVATE |
#ifdef HAVE_MMAP_ANONYMOUS
MAP_ANONYMOUS
#else
MAP_ANON
#endif
; // Ends statement above
int Protect = getPosixProtectionFlags(PFlags);
// Use any near hint and the page size to set a page-aligned starting address
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
NearBlock->size() : 0;
if (Start && Start % PageSize)
Start += PageSize - Start % PageSize;
void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
Protect, MMFlags, fd, 0);
if (Addr == MAP_FAILED) {
if (NearBlock) //Try again without a near hint
return allocateMappedMemory(NumBytes, 0, PFlags, EC);
EC = error_code(errno, system_category());
return MemoryBlock();
}
MemoryBlock Result;
Result.Address = Addr;
Result.Size = NumPages*PageSize;
if (PFlags & MF_EXEC)
Memory::InvalidateInstructionCache(Result.Address, Result.Size);
return Result;
}
error_code
Memory::releaseMappedMemory(MemoryBlock &M) {
if (M.Address == 0 || M.Size == 0)
return error_code::success();
if (0 != ::munmap(M.Address, M.Size))
return error_code(errno, system_category());
M.Address = 0;
M.Size = 0;
return error_code::success();
}
error_code
Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
if (M.Address == 0 || M.Size == 0)
return error_code::success();
if (!Flags)
return error_code(EINVAL, generic_category());
int Protect = getPosixProtectionFlags(Flags);
int Result = ::mprotect(M.Address, M.Size, Protect);
if (Result != 0)
return error_code(errno, system_category());
if (Flags & MF_EXEC)
Memory::InvalidateInstructionCache(M.Address, M.Size);
return error_code::success();
}
/// AllocateRWX - Allocate a slab of memory with read/write/execute
/// permissions. This is typically used for JIT applications where we want
/// to emit code to the memory then jump to it. Getting this type of memory
/// is very OS specific.
///
llvm::sys::MemoryBlock
llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
std::string *ErrMsg) {
MemoryBlock
Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
std::string *ErrMsg) {
if (NumBytes == 0) return MemoryBlock();
size_t pageSize = Process::GetPageSize();
@@ -86,7 +211,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
if (KERN_SUCCESS != kr) {
MakeErrMsg(ErrMsg, "vm_protect max RX failed");
return sys::MemoryBlock();
return MemoryBlock();
}
kr = vm_protect(mach_task_self(), (vm_address_t)pa,
@@ -94,7 +219,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
VM_PROT_READ | VM_PROT_WRITE);
if (KERN_SUCCESS != kr) {
MakeErrMsg(ErrMsg, "vm_protect RW failed");
return sys::MemoryBlock();
return MemoryBlock();
}
#endif
@@ -105,17 +230,17 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
return result;
}
bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
if (M.Address == 0 || M.Size == 0) return false;
if (0 != ::munmap(M.Address, M.Size))
return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
return false;
}
bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
#if defined(__APPLE__) && defined(__arm__)
if (M.Address == 0 || M.Size == 0) return false;
sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
return KERN_SUCCESS == kr;
@@ -124,10 +249,10 @@ bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
#endif
}
bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
#if defined(__APPLE__) && defined(__arm__)
if (M.Address == 0 || M.Size == 0) return false;
sys::Memory::InvalidateInstructionCache(M.Address, M.Size);
Memory::InvalidateInstructionCache(M.Address, M.Size);
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
(vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
return KERN_SUCCESS == kr;
@@ -136,7 +261,7 @@ bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
#endif
}
bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) {
bool Memory::setRangeWritable(const void *Addr, size_t Size) {
#if defined(__APPLE__) && defined(__arm__)
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
@@ -147,7 +272,7 @@ bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) {
#endif
}
bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) {
bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
#if defined(__APPLE__) && defined(__arm__)
kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
(vm_size_t)Size, 0,
@@ -157,3 +282,52 @@ bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) {
return true;
#endif
}
/// InvalidateInstructionCache - Before the JIT can run a block of code
/// that has been emitted it must invalidate the instruction cache on some
/// platforms.
void Memory::InvalidateInstructionCache(const void *Addr,
size_t Len) {
// icache invalidation for PPC and ARM.
#if defined(__APPLE__)
# if (defined(__POWERPC__) || defined (__ppc__) || \
defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__)
sys_icache_invalidate(const_cast<void *>(Addr), Len);
# endif
#else
# if (defined(__POWERPC__) || defined (__ppc__) || \
defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
const size_t LineSize = 32;
const intptr_t Mask = ~(LineSize - 1);
const intptr_t StartLine = ((intptr_t) Addr) & Mask;
const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
asm volatile("dcbf 0, %0" : : "r"(Line));
asm volatile("sync");
for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
asm volatile("icbi 0, %0" : : "r"(Line));
asm volatile("isync");
# elif defined(__arm__) && defined(__GNUC__)
// FIXME: Can we safely always call this for __GNUC__ everywhere?
const char *Start = static_cast<const char *>(Addr);
const char *End = Start + Len;
__clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
# elif defined(__mips__)
const char *Start = static_cast<const char *>(Addr);
cacheflush(const_cast<char *>(Start), Len, BCACHE);
# endif
#endif // end apple
ValgrindDiscardTranslations(Addr, Len);
}
} // namespace sys
} // namespace llvm

View File

@@ -12,51 +12,163 @@
//
//===----------------------------------------------------------------------===//
#include "Windows.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Process.h"
#include "Windows.h"
namespace {
DWORD getWindowsProtectionFlags(unsigned Flags) {
switch (Flags) {
// Contrary to what you might expect, the Windows page protection flags
// are not a bitwise combination of RWX values
case llvm::sys::Memory::MF_READ:
return PAGE_READONLY;
case llvm::sys::Memory::MF_WRITE:
// Note: PAGE_WRITE is not supported by VirtualProtect
return PAGE_READWRITE;
case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
return PAGE_READWRITE;
case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
return PAGE_EXECUTE_READ;
case llvm::sys::Memory::MF_READ |
llvm::sys::Memory::MF_WRITE |
llvm::sys::Memory::MF_EXEC:
return PAGE_EXECUTE_READWRITE;
case llvm::sys::Memory::MF_EXEC:
return PAGE_EXECUTE;
default:
llvm_unreachable("Illegal memory protection flag specified!");
}
// Provide a default return value as required by some compilers.
return PAGE_NOACCESS;
}
size_t getAllocationGranularity() {
SYSTEM_INFO Info;
::GetSystemInfo(&Info);
if (Info.dwPageSize > Info.dwAllocationGranularity)
return Info.dwPageSize;
else
return Info.dwAllocationGranularity;
}
} // namespace
namespace llvm {
using namespace sys;
namespace sys {
//===----------------------------------------------------------------------===//
//=== WARNING: Implementation here must contain only Win32 specific code
//=== and must not be UNIX code
//===----------------------------------------------------------------------===//
MemoryBlock Memory::AllocateRWX(size_t NumBytes,
const MemoryBlock *NearBlock,
std::string *ErrMsg) {
if (NumBytes == 0) return MemoryBlock();
MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
const MemoryBlock *const NearBlock,
unsigned Flags,
error_code &EC) {
EC = error_code::success();
if (NumBytes == 0)
return MemoryBlock();
static const size_t pageSize = Process::GetPageSize();
size_t NumPages = (NumBytes+pageSize-1)/pageSize;
// While we'd be happy to allocate single pages, the Windows allocation
// granularity may be larger than a single page (in practice, it is 64K)
// so mapping less than that will create an unreachable fragment of memory.
static const size_t Granularity = getAllocationGranularity();
const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity;
PVOID start = NearBlock ? static_cast<unsigned char *>(NearBlock->base()) +
NearBlock->size() : NULL;
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
NearBlock->size()
: NULL;
void *pa = VirtualAlloc(start, NumPages*pageSize, MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
if (pa == NULL) {
// If the requested address is not aligned to the allocation granularity,
// round up to get beyond NearBlock. VirtualAlloc would have rounded down.
if (Start && Start % Granularity != 0)
Start += Granularity - Start % Granularity;
DWORD Protect = getWindowsProtectionFlags(Flags);
void *PA = ::VirtualAlloc(reinterpret_cast<void*>(Start),
NumBlocks*Granularity,
MEM_RESERVE | MEM_COMMIT, Protect);
if (PA == NULL) {
if (NearBlock) {
// Try again without the NearBlock hint
return AllocateRWX(NumBytes, NULL, ErrMsg);
return allocateMappedMemory(NumBytes, NULL, Flags, EC);
}
MakeErrMsg(ErrMsg, "Can't allocate RWX Memory: ");
EC = error_code(::GetLastError(), system_category());
return MemoryBlock();
}
MemoryBlock result;
result.Address = pa;
result.Size = NumPages*pageSize;
return result;
MemoryBlock Result;
Result.Address = PA;
Result.Size = NumBlocks*Granularity;
;
if (Flags & MF_EXEC)
Memory::InvalidateInstructionCache(Result.Address, Result.Size);
return Result;
}
error_code Memory::releaseMappedMemory(MemoryBlock &M) {
if (M.Address == 0 || M.Size == 0)
return error_code::success();
if (!VirtualFree(M.Address, 0, MEM_RELEASE))
return error_code(::GetLastError(), system_category());
M.Address = 0;
M.Size = 0;
return error_code::success();
}
error_code Memory::protectMappedMemory(const MemoryBlock &M,
unsigned Flags) {
if (M.Address == 0 || M.Size == 0)
return error_code::success();
DWORD Protect = getWindowsProtectionFlags(Flags);
DWORD OldFlags;
if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags))
return error_code(::GetLastError(), system_category());
if (Flags & MF_EXEC)
Memory::InvalidateInstructionCache(M.Address, M.Size);
return error_code::success();
}
/// InvalidateInstructionCache - Before the JIT can run a block of code
/// that has been emitted it must invalidate the instruction cache on some
/// platforms.
void Memory::InvalidateInstructionCache(
const void *Addr, size_t Len) {
FlushInstructionCache(GetCurrentProcess(), Addr, Len);
}
MemoryBlock Memory::AllocateRWX(size_t NumBytes,
const MemoryBlock *NearBlock,
std::string *ErrMsg) {
MemoryBlock MB;
error_code EC;
MB = allocateMappedMemory(NumBytes, NearBlock,
MF_READ|MF_WRITE|MF_EXEC, EC);
if (EC != error_code::success() && ErrMsg) {
MakeErrMsg(ErrMsg, EC.message());
}
return MB;
}
bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
if (M.Address == 0 || M.Size == 0) return false;
if (!VirtualFree(M.Address, 0, MEM_RELEASE))
return MakeErrMsg(ErrMsg, "Can't release RWX Memory: ");
return false;
error_code EC = releaseMappedMemory(M);
if (EC == error_code::success())
return false;
MakeErrMsg(ErrMsg, EC.message());
return true;
}
static DWORD getProtection(const void *addr) {
@@ -93,7 +205,7 @@ bool Memory::setRangeWritable(const void *Addr, size_t Size) {
}
DWORD oldProt;
sys::Memory::InvalidateInstructionCache(Addr, Size);
Memory::InvalidateInstructionCache(Addr, Size);
return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt)
== TRUE;
}
@@ -112,9 +224,10 @@ bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
}
DWORD oldProt;
sys::Memory::InvalidateInstructionCache(Addr, Size);
Memory::InvalidateInstructionCache(Addr, Size);
return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt)
== TRUE;
}
}
} // namespace sys
} // namespace llvm

View File

@@ -17,6 +17,7 @@ add_llvm_unittest(SupportTests
LeakDetectorTest.cpp
ManagedStatic.cpp
MathExtrasTest.cpp
MemoryTest.cpp
Path.cpp
RegexTest.cpp
SwapByteOrderTest.cpp

View File

@@ -0,0 +1,356 @@
//===- llvm/unittest/Support/AllocatorTest.cpp - BumpPtrAllocator tests ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/Memory.h"
#include "llvm/Support/Process.h"
#include "gtest/gtest.h"
#include <cstdlib>
using namespace llvm;
using namespace sys;
namespace {
class MappedMemoryTest : public ::testing::TestWithParam<unsigned> {
public:
MappedMemoryTest() {
Flags = GetParam();
PageSize = sys::Process::GetPageSize();
}
protected:
// Adds RW flags to permit testing of the resulting memory
unsigned getTestableEquivalent(unsigned RequestedFlags) {
switch (RequestedFlags) {
case Memory::MF_READ:
case Memory::MF_WRITE:
case Memory::MF_READ|Memory::MF_WRITE:
return Memory::MF_READ|Memory::MF_WRITE;
case Memory::MF_READ|Memory::MF_EXEC:
case Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC:
case Memory::MF_EXEC:
return Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC;
}
// Default in case values are added to the enum, as required by some compilers
return Memory::MF_READ|Memory::MF_WRITE;
}
// Returns true if the memory blocks overlap
bool doesOverlap(MemoryBlock M1, MemoryBlock M2) {
if (M1.base() == M2.base())
return true;
if (M1.base() > M2.base())
return (unsigned char *)M2.base() + M2.size() > M1.base();
return (unsigned char *)M1.base() + M1.size() > M2.base();
}
unsigned Flags;
size_t PageSize;
};
TEST_P(MappedMemoryTest, AllocAndRelease) {
error_code EC;
MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(sizeof(int), M1.size());
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
}
TEST_P(MappedMemoryTest, MultipleAllocAndRelease) {
error_code EC;
MemoryBlock M1 = Memory::allocateMappedMemory(16, 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M2 = Memory::allocateMappedMemory(64, 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M3 = Memory::allocateMappedMemory(32, 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(16U, M1.size());
EXPECT_NE((void*)0, M2.base());
EXPECT_LE(64U, M2.size());
EXPECT_NE((void*)0, M3.base());
EXPECT_LE(32U, M3.size());
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_FALSE(doesOverlap(M1, M3));
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
MemoryBlock M4 = Memory::allocateMappedMemory(16, 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M4.base());
EXPECT_LE(16U, M4.size());
EXPECT_FALSE(Memory::releaseMappedMemory(M4));
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
}
TEST_P(MappedMemoryTest, BasicWrite) {
// This test applies only to writeable combinations
if (Flags && !(Flags & Memory::MF_WRITE))
return;
error_code EC;
MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(sizeof(int), M1.size());
int *a = (int*)M1.base();
*a = 1;
EXPECT_EQ(1, *a);
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
}
TEST_P(MappedMemoryTest, MultipleWrite) {
// This test applies only to writeable combinations
if (Flags && !(Flags & Memory::MF_WRITE))
return;
error_code EC;
MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_FALSE(doesOverlap(M1, M3));
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(1U * sizeof(int), M1.size());
EXPECT_NE((void*)0, M2.base());
EXPECT_LE(8U * sizeof(int), M2.size());
EXPECT_NE((void*)0, M3.base());
EXPECT_LE(4U * sizeof(int), M3.size());
int *x = (int*)M1.base();
*x = 1;
int *y = (int*)M2.base();
for (int i = 0; i < 8; i++) {
y[i] = i;
}
int *z = (int*)M3.base();
*z = 42;
EXPECT_EQ(1, *x);
EXPECT_EQ(7, y[7]);
EXPECT_EQ(42, *z);
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
MemoryBlock M4 = Memory::allocateMappedMemory(64 * sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M4.base());
EXPECT_LE(64U * sizeof(int), M4.size());
x = (int*)M4.base();
*x = 4;
EXPECT_EQ(4, *x);
EXPECT_FALSE(Memory::releaseMappedMemory(M4));
// Verify that M2 remains unaffected by other activity
for (int i = 0; i < 8; i++) {
EXPECT_EQ(i, y[i]);
}
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
}
TEST_P(MappedMemoryTest, EnabledWrite) {
error_code EC;
MemoryBlock M1 = Memory::allocateMappedMemory(2 * sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(2U * sizeof(int), M1.size());
EXPECT_NE((void*)0, M2.base());
EXPECT_LE(8U * sizeof(int), M2.size());
EXPECT_NE((void*)0, M3.base());
EXPECT_LE(4U * sizeof(int), M3.size());
EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags)));
EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags)));
EXPECT_FALSE(Memory::protectMappedMemory(M3, getTestableEquivalent(Flags)));
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_FALSE(doesOverlap(M1, M3));
int *x = (int*)M1.base();
*x = 1;
int *y = (int*)M2.base();
for (unsigned int i = 0; i < 8; i++) {
y[i] = i;
}
int *z = (int*)M3.base();
*z = 42;
EXPECT_EQ(1, *x);
EXPECT_EQ(7, y[7]);
EXPECT_EQ(42, *z);
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
EXPECT_EQ(6, y[6]);
MemoryBlock M4 = Memory::allocateMappedMemory(16, 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M4.base());
EXPECT_LE(16U, M4.size());
EXPECT_EQ(error_code::success(), Memory::protectMappedMemory(M4, getTestableEquivalent(Flags)));
x = (int*)M4.base();
*x = 4;
EXPECT_EQ(4, *x);
EXPECT_FALSE(Memory::releaseMappedMemory(M4));
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
}
TEST_P(MappedMemoryTest, SuccessiveNear) {
error_code EC;
MemoryBlock M1 = Memory::allocateMappedMemory(16, 0, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M2 = Memory::allocateMappedMemory(64, &M1, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M3 = Memory::allocateMappedMemory(32, &M2, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(16U, M1.size());
EXPECT_NE((void*)0, M2.base());
EXPECT_LE(64U, M2.size());
EXPECT_NE((void*)0, M3.base());
EXPECT_LE(32U, M3.size());
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_FALSE(doesOverlap(M1, M3));
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
}
TEST_P(MappedMemoryTest, DuplicateNear) {
error_code EC;
MemoryBlock Near((void*)(3*PageSize), 16);
MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(16U, M1.size());
EXPECT_NE((void*)0, M2.base());
EXPECT_LE(64U, M2.size());
EXPECT_NE((void*)0, M3.base());
EXPECT_LE(32U, M3.size());
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
}
TEST_P(MappedMemoryTest, ZeroNear) {
error_code EC;
MemoryBlock Near(0, 0);
MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(16U, M1.size());
EXPECT_NE((void*)0, M2.base());
EXPECT_LE(64U, M2.size());
EXPECT_NE((void*)0, M3.base());
EXPECT_LE(32U, M3.size());
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_FALSE(doesOverlap(M1, M3));
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
}
TEST_P(MappedMemoryTest, ZeroSizeNear) {
error_code EC;
MemoryBlock Near((void*)(4*PageSize), 0);
MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(16U, M1.size());
EXPECT_NE((void*)0, M2.base());
EXPECT_LE(64U, M2.size());
EXPECT_NE((void*)0, M3.base());
EXPECT_LE(32U, M3.size());
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_FALSE(doesOverlap(M1, M3));
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
}
TEST_P(MappedMemoryTest, UnalignedNear) {
error_code EC;
MemoryBlock Near((void*)(2*PageSize+5), 0);
MemoryBlock M1 = Memory::allocateMappedMemory(15, &Near, Flags, EC);
EXPECT_EQ(error_code::success(), EC);
EXPECT_NE((void*)0, M1.base());
EXPECT_LE(sizeof(int), M1.size());
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
}
// Note that Memory::MF_WRITE is not supported exclusively across
// operating systems and architectures and can imply MF_READ|MF_WRITE
unsigned MemoryFlags[] = {
Memory::MF_READ,
Memory::MF_WRITE,
Memory::MF_READ|Memory::MF_WRITE,
Memory::MF_EXEC,
Memory::MF_READ|Memory::MF_EXEC,
Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC
};
INSTANTIATE_TEST_CASE_P(AllocationTests,
MappedMemoryTest,
::testing::ValuesIn(MemoryFlags));
} // anonymous namespace