Retro68/gcc/libgo/runtime/mem.c

231 lines
5.3 KiB
C
Raw Normal View History

2012-03-27 23:13:14 +00:00
/* Defining _XOPEN_SOURCE hides the declaration of madvise() on Solaris <
11 and the MADV_DONTNEED definition on IRIX 6.5. */
#undef _XOPEN_SOURCE
#include <errno.h>
#include <unistd.h>
#include "runtime.h"
#include "arch.h"
#include "malloc.h"
#ifndef MAP_ANON
#ifdef MAP_ANONYMOUS
#define MAP_ANON MAP_ANONYMOUS
#else
#define USE_DEV_ZERO
#define MAP_ANON 0
#endif
#endif
2014-09-21 17:33:12 +00:00
#ifndef MAP_NORESERVE
#define MAP_NORESERVE 0
#endif
2012-03-27 23:13:14 +00:00
#ifdef USE_DEV_ZERO
static int dev_zero = -1;
#endif
2015-08-28 15:33:40 +00:00
static int32
2012-03-27 23:13:14 +00:00
addrspace_free(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused)))
{
#ifdef HAVE_MINCORE
size_t page_size = getpagesize();
2015-08-28 15:33:40 +00:00
int32 errval;
uintptr chunk;
uintptr off;
// NOTE: vec must be just 1 byte long here.
// Mincore returns ENOMEM if any of the pages are unmapped,
// but we want to know that all of the pages are unmapped.
// To make these the same, we can only ask about one page
// at a time. See golang.org/issue/7476.
static byte vec[1];
2012-03-27 23:13:14 +00:00
errno = 0;
2015-08-28 15:33:40 +00:00
for(off = 0; off < n; off += chunk) {
chunk = page_size * sizeof vec;
if(chunk > (n - off))
chunk = n - off;
errval = mincore((char*)v + off, chunk, (void*)vec);
// ENOMEM means unmapped, which is what we want.
// Anything else we assume means the pages are mapped.
if(errval == 0 || errno != ENOMEM)
2012-03-27 23:13:14 +00:00
return 0;
2015-08-28 15:33:40 +00:00
}
2012-03-27 23:13:14 +00:00
#endif
return 1;
}
static void *
mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset)
{
void *p;
p = runtime_mmap((void *)v, n, prot, flags, fd, offset);
if(p != v && addrspace_free(v, n)) {
// On some systems, mmap ignores v without
// MAP_FIXED, so retry if the address space is free.
if(p != MAP_FAILED)
runtime_munmap(p, n);
p = runtime_mmap((void *)v, n, prot, flags|MAP_FIXED, fd, offset);
}
return p;
}
void*
2014-09-21 17:33:12 +00:00
runtime_SysAlloc(uintptr n, uint64 *stat)
2012-03-27 23:13:14 +00:00
{
void *p;
int fd = -1;
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
if (dev_zero < 0) {
runtime_printf("open /dev/zero: errno=%d\n", errno);
exit(2);
}
}
fd = dev_zero;
#endif
2014-09-21 17:33:12 +00:00
p = runtime_mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0);
2012-03-27 23:13:14 +00:00
if (p == MAP_FAILED) {
if(errno == EACCES) {
runtime_printf("runtime: mmap: access denied\n");
runtime_printf("if you're running SELinux, enable execmem for this process.\n");
exit(2);
}
2014-09-21 17:33:12 +00:00
if(errno == EAGAIN) {
runtime_printf("runtime: mmap: too much locked memory (check 'ulimit -l').\n");
runtime_exit(2);
}
2012-03-27 23:13:14 +00:00
return nil;
}
2014-09-21 17:33:12 +00:00
runtime_xadd64(stat, n);
2012-03-27 23:13:14 +00:00
return p;
}
void
runtime_SysUnused(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused)))
{
#ifdef MADV_DONTNEED
runtime_madvise(v, n, MADV_DONTNEED);
#endif
}
void
2014-09-21 17:33:12 +00:00
runtime_SysUsed(void *v, uintptr n)
2012-03-27 23:13:14 +00:00
{
2014-09-21 17:33:12 +00:00
USED(v);
USED(n);
}
void
runtime_SysFree(void *v, uintptr n, uint64 *stat)
{
runtime_xadd64(stat, -(uint64)n);
2012-03-27 23:13:14 +00:00
runtime_munmap(v, n);
}
2015-08-28 15:33:40 +00:00
void
runtime_SysFault(void *v, uintptr n)
{
int fd = -1;
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
if (dev_zero < 0) {
runtime_printf("open /dev/zero: errno=%d\n", errno);
exit(2);
}
}
fd = dev_zero;
#endif
runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, fd, 0);
}
2012-03-27 23:13:14 +00:00
void*
2015-08-28 15:33:40 +00:00
runtime_SysReserve(void *v, uintptr n, bool *reserved)
2012-03-27 23:13:14 +00:00
{
int fd = -1;
void *p;
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
if (dev_zero < 0) {
runtime_printf("open /dev/zero: errno=%d\n", errno);
exit(2);
}
}
fd = dev_zero;
#endif
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// if we can reserve at least 64K and check the assumption in SysMap.
// Only user-mode Linux (UML) rejects these requests.
2015-08-28 15:33:40 +00:00
if(sizeof(void*) == 8 && (n >> 16) > 1LLU<<16) {
2012-03-27 23:13:14 +00:00
p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0);
2014-09-21 17:33:12 +00:00
if (p != v) {
runtime_munmap(p, 64<<10);
2012-03-27 23:13:14 +00:00
return nil;
2014-09-21 17:33:12 +00:00
}
2012-03-27 23:13:14 +00:00
runtime_munmap(p, 64<<10);
2015-08-28 15:33:40 +00:00
*reserved = false;
2012-03-27 23:13:14 +00:00
return v;
}
2014-09-21 17:33:12 +00:00
// Use the MAP_NORESERVE mmap() flag here because typically most of
// this reservation will never be used. It does not make sense
// reserve a huge amount of unneeded swap space. This is important on
// systems which do not overcommit memory by default.
p = runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_NORESERVE, fd, 0);
2012-03-27 23:13:14 +00:00
if(p == MAP_FAILED)
return nil;
2015-08-28 15:33:40 +00:00
*reserved = true;
2012-03-27 23:13:14 +00:00
return p;
}
void
2015-08-28 15:33:40 +00:00
runtime_SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
2012-03-27 23:13:14 +00:00
{
void *p;
int fd = -1;
2014-09-21 17:33:12 +00:00
runtime_xadd64(stat, n);
2012-03-27 23:13:14 +00:00
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
if (dev_zero < 0) {
runtime_printf("open /dev/zero: errno=%d\n", errno);
exit(2);
}
}
fd = dev_zero;
#endif
// On 64-bit, we don't actually have v reserved, so tread carefully.
2015-08-28 15:33:40 +00:00
if(!reserved) {
2014-09-21 17:33:12 +00:00
p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0);
2012-03-27 23:13:14 +00:00
if(p == MAP_FAILED && errno == ENOMEM)
runtime_throw("runtime: out of memory");
if(p != v) {
runtime_printf("runtime: address space conflict: map(%p) = %p\n", v, p);
runtime_throw("runtime: address space conflict");
}
return;
}
2014-09-21 17:33:12 +00:00
p = runtime_mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, fd, 0);
if(p == MAP_FAILED && errno == ENOMEM)
runtime_throw("runtime: out of memory");
2012-03-27 23:13:14 +00:00
if(p != v)
runtime_throw("runtime: cannot map pages in arena address space");
}