mirror of
https://github.com/autc04/Retro68.git
synced 2024-12-27 23:31:44 +00:00
398 lines
9.1 KiB
Plaintext
398 lines
9.1 KiB
Plaintext
// Copyright 2013 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// +build darwin dragonfly freebsd linux netbsd openbsd windows
|
|
|
|
package net
|
|
|
|
#include "runtime.h"
|
|
#include "defs.h"
|
|
#include "arch.h"
|
|
#include "malloc.h"
|
|
|
|
// Map gccgo field names to gc field names.
|
|
// Eface aka __go_empty_interface.
|
|
#define type __type_descriptor
|
|
#define data __object
|
|
|
|
// Integrated network poller (platform-independent part).
|
|
// A particular implementation (epoll/kqueue) must define the following functions:
|
|
// void runtime_netpollinit(void); // to initialize the poller
|
|
// int32 runtime_netpollopen(uintptr fd, PollDesc *pd); // to arm edge-triggered notifications
|
|
// and associate fd with pd.
|
|
// An implementation must call the following function to denote that the pd is ready.
|
|
// void runtime_netpollready(G **gpp, PollDesc *pd, int32 mode);
|
|
|
|
#define READY ((G*)1)
|
|
|
|
struct PollDesc
|
|
{
|
|
PollDesc* link; // in pollcache, protected by pollcache.Lock
|
|
Lock; // protectes the following fields
|
|
uintptr fd;
|
|
bool closing;
|
|
uintptr seq; // protects from stale timers and ready notifications
|
|
G* rg; // G waiting for read or READY (binary semaphore)
|
|
Timer rt; // read deadline timer (set if rt.fv != nil)
|
|
int64 rd; // read deadline
|
|
G* wg; // the same for writes
|
|
Timer wt;
|
|
int64 wd;
|
|
};
|
|
|
|
static struct
|
|
{
|
|
Lock;
|
|
PollDesc* first;
|
|
// PollDesc objects must be type-stable,
|
|
// because we can get ready notification from epoll/kqueue
|
|
// after the descriptor is closed/reused.
|
|
// Stale notifications are detected using seq variable,
|
|
// seq is incremented when deadlines are changed or descriptor is reused.
|
|
} pollcache;
|
|
|
|
static bool netpollblock(PollDesc*, int32);
|
|
static G* netpollunblock(PollDesc*, int32, bool);
|
|
static void deadline(int64, Eface);
|
|
static void readDeadline(int64, Eface);
|
|
static void writeDeadline(int64, Eface);
|
|
static PollDesc* allocPollDesc(void);
|
|
static intgo checkerr(PollDesc *pd, int32 mode);
|
|
|
|
static FuncVal deadlineFn = {(void(*)(void))deadline};
|
|
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
|
|
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
|
|
|
|
func runtime_pollServerInit() {
|
|
runtime_netpollinit();
|
|
}
|
|
|
|
func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
|
|
pd = allocPollDesc();
|
|
runtime_lock(pd);
|
|
if(pd->wg != nil && pd->wg != READY)
|
|
runtime_throw("runtime_pollOpen: blocked write on free descriptor");
|
|
if(pd->rg != nil && pd->rg != READY)
|
|
runtime_throw("runtime_pollOpen: blocked read on free descriptor");
|
|
pd->fd = fd;
|
|
pd->closing = false;
|
|
pd->seq++;
|
|
pd->rg = nil;
|
|
pd->rd = 0;
|
|
pd->wg = nil;
|
|
pd->wd = 0;
|
|
runtime_unlock(pd);
|
|
|
|
errno = runtime_netpollopen(fd, pd);
|
|
}
|
|
|
|
func runtime_pollClose(pd *PollDesc) {
|
|
if(!pd->closing)
|
|
runtime_throw("runtime_pollClose: close w/o unblock");
|
|
if(pd->wg != nil && pd->wg != READY)
|
|
runtime_throw("runtime_pollClose: blocked write on closing descriptor");
|
|
if(pd->rg != nil && pd->rg != READY)
|
|
runtime_throw("runtime_pollClose: blocked read on closing descriptor");
|
|
runtime_netpollclose(pd->fd);
|
|
runtime_lock(&pollcache);
|
|
pd->link = pollcache.first;
|
|
pollcache.first = pd;
|
|
runtime_unlock(&pollcache);
|
|
}
|
|
|
|
func runtime_pollReset(pd *PollDesc, mode int) (err int) {
|
|
runtime_lock(pd);
|
|
err = checkerr(pd, mode);
|
|
if(err)
|
|
goto ret;
|
|
if(mode == 'r')
|
|
pd->rg = nil;
|
|
else if(mode == 'w')
|
|
pd->wg = nil;
|
|
ret:
|
|
runtime_unlock(pd);
|
|
}
|
|
|
|
func runtime_pollWait(pd *PollDesc, mode int) (err int) {
|
|
runtime_lock(pd);
|
|
err = checkerr(pd, mode);
|
|
if(err == 0) {
|
|
while(!netpollblock(pd, mode)) {
|
|
err = checkerr(pd, mode);
|
|
if(err != 0)
|
|
break;
|
|
// Can happen if timeout has fired and unblocked us,
|
|
// but before we had a chance to run, timeout has been reset.
|
|
// Pretend it has not happened and retry.
|
|
}
|
|
}
|
|
runtime_unlock(pd);
|
|
}
|
|
|
|
func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
|
|
runtime_lock(pd);
|
|
// wait for ioready, ignore closing or timeouts.
|
|
while(!netpollblock(pd, mode))
|
|
;
|
|
runtime_unlock(pd);
|
|
}
|
|
|
|
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
|
|
G *rg, *wg;
|
|
|
|
runtime_lock(pd);
|
|
if(pd->closing) {
|
|
runtime_unlock(pd);
|
|
return;
|
|
}
|
|
pd->seq++; // invalidate current timers
|
|
// Reset current timers.
|
|
if(pd->rt.fv) {
|
|
runtime_deltimer(&pd->rt);
|
|
pd->rt.fv = nil;
|
|
}
|
|
if(pd->wt.fv) {
|
|
runtime_deltimer(&pd->wt);
|
|
pd->wt.fv = nil;
|
|
}
|
|
// Setup new timers.
|
|
if(d != 0 && d <= runtime_nanotime())
|
|
d = -1;
|
|
if(mode == 'r' || mode == 'r'+'w')
|
|
pd->rd = d;
|
|
if(mode == 'w' || mode == 'r'+'w')
|
|
pd->wd = d;
|
|
if(pd->rd > 0 && pd->rd == pd->wd) {
|
|
pd->rt.fv = &deadlineFn;
|
|
pd->rt.when = pd->rd;
|
|
// Copy current seq into the timer arg.
|
|
// Timer func will check the seq against current descriptor seq,
|
|
// if they differ the descriptor was reused or timers were reset.
|
|
pd->rt.arg.type = (Type*)pd->seq;
|
|
pd->rt.arg.data = pd;
|
|
runtime_addtimer(&pd->rt);
|
|
} else {
|
|
if(pd->rd > 0) {
|
|
pd->rt.fv = &readDeadlineFn;
|
|
pd->rt.when = pd->rd;
|
|
pd->rt.arg.type = (Type*)pd->seq;
|
|
pd->rt.arg.data = pd;
|
|
runtime_addtimer(&pd->rt);
|
|
}
|
|
if(pd->wd > 0) {
|
|
pd->wt.fv = &writeDeadlineFn;
|
|
pd->wt.when = pd->wd;
|
|
pd->wt.arg.type = (Type*)pd->seq;
|
|
pd->wt.arg.data = pd;
|
|
runtime_addtimer(&pd->wt);
|
|
}
|
|
}
|
|
// If we set the new deadline in the past, unblock currently pending IO if any.
|
|
rg = nil;
|
|
wg = nil;
|
|
if(pd->rd < 0)
|
|
rg = netpollunblock(pd, 'r', false);
|
|
if(pd->wd < 0)
|
|
wg = netpollunblock(pd, 'w', false);
|
|
runtime_unlock(pd);
|
|
if(rg)
|
|
runtime_ready(rg);
|
|
if(wg)
|
|
runtime_ready(wg);
|
|
}
|
|
|
|
func runtime_pollUnblock(pd *PollDesc) {
|
|
G *rg, *wg;
|
|
|
|
runtime_lock(pd);
|
|
if(pd->closing)
|
|
runtime_throw("runtime_pollUnblock: already closing");
|
|
pd->closing = true;
|
|
pd->seq++;
|
|
rg = netpollunblock(pd, 'r', false);
|
|
wg = netpollunblock(pd, 'w', false);
|
|
if(pd->rt.fv) {
|
|
runtime_deltimer(&pd->rt);
|
|
pd->rt.fv = nil;
|
|
}
|
|
if(pd->wt.fv) {
|
|
runtime_deltimer(&pd->wt);
|
|
pd->wt.fv = nil;
|
|
}
|
|
runtime_unlock(pd);
|
|
if(rg)
|
|
runtime_ready(rg);
|
|
if(wg)
|
|
runtime_ready(wg);
|
|
}
|
|
|
|
uintptr
|
|
runtime_netpollfd(PollDesc *pd)
|
|
{
|
|
return pd->fd;
|
|
}
|
|
|
|
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
|
|
void
|
|
runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
|
|
{
|
|
G *rg, *wg;
|
|
|
|
rg = wg = nil;
|
|
runtime_lock(pd);
|
|
if(mode == 'r' || mode == 'r'+'w')
|
|
rg = netpollunblock(pd, 'r', true);
|
|
if(mode == 'w' || mode == 'r'+'w')
|
|
wg = netpollunblock(pd, 'w', true);
|
|
runtime_unlock(pd);
|
|
if(rg) {
|
|
rg->schedlink = *gpp;
|
|
*gpp = rg;
|
|
}
|
|
if(wg) {
|
|
wg->schedlink = *gpp;
|
|
*gpp = wg;
|
|
}
|
|
}
|
|
|
|
static intgo
|
|
checkerr(PollDesc *pd, int32 mode)
|
|
{
|
|
if(pd->closing)
|
|
return 1; // errClosing
|
|
if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
|
|
return 2; // errTimeout
|
|
return 0;
|
|
}
|
|
|
|
// returns true if IO is ready, or false if timedout or closed
|
|
static bool
|
|
netpollblock(PollDesc *pd, int32 mode)
|
|
{
|
|
G **gpp;
|
|
|
|
gpp = &pd->rg;
|
|
if(mode == 'w')
|
|
gpp = &pd->wg;
|
|
if(*gpp == READY) {
|
|
*gpp = nil;
|
|
return true;
|
|
}
|
|
if(*gpp != nil)
|
|
runtime_throw("netpollblock: double wait");
|
|
*gpp = runtime_g();
|
|
runtime_park(runtime_unlock, &pd->Lock, "IO wait");
|
|
runtime_lock(pd);
|
|
if(runtime_g()->param)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
static G*
|
|
netpollunblock(PollDesc *pd, int32 mode, bool ioready)
|
|
{
|
|
G **gpp, *old;
|
|
|
|
gpp = &pd->rg;
|
|
if(mode == 'w')
|
|
gpp = &pd->wg;
|
|
if(*gpp == READY)
|
|
return nil;
|
|
if(*gpp == nil) {
|
|
// Only set READY for ioready. runtime_pollWait
|
|
// will check for timeout/cancel before waiting.
|
|
if(ioready)
|
|
*gpp = READY;
|
|
return nil;
|
|
}
|
|
old = *gpp;
|
|
// pass unblock reason onto blocked g
|
|
old->param = (void*)(uintptr)ioready;
|
|
*gpp = nil;
|
|
return old;
|
|
}
|
|
|
|
static void
|
|
deadlineimpl(int64 now, Eface arg, bool read, bool write)
|
|
{
|
|
PollDesc *pd;
|
|
uint32 seq;
|
|
G *rg, *wg;
|
|
|
|
USED(now);
|
|
pd = (PollDesc*)arg.data;
|
|
// This is the seq when the timer was set.
|
|
// If it's stale, ignore the timer event.
|
|
seq = (uintptr)arg.type;
|
|
rg = wg = nil;
|
|
runtime_lock(pd);
|
|
if(seq != pd->seq) {
|
|
// The descriptor was reused or timers were reset.
|
|
runtime_unlock(pd);
|
|
return;
|
|
}
|
|
if(read) {
|
|
if(pd->rd <= 0 || pd->rt.fv == nil)
|
|
runtime_throw("deadlineimpl: inconsistent read deadline");
|
|
pd->rd = -1;
|
|
pd->rt.fv = nil;
|
|
rg = netpollunblock(pd, 'r', false);
|
|
}
|
|
if(write) {
|
|
if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
|
|
runtime_throw("deadlineimpl: inconsistent write deadline");
|
|
pd->wd = -1;
|
|
pd->wt.fv = nil;
|
|
wg = netpollunblock(pd, 'w', false);
|
|
}
|
|
runtime_unlock(pd);
|
|
if(rg)
|
|
runtime_ready(rg);
|
|
if(wg)
|
|
runtime_ready(wg);
|
|
}
|
|
|
|
static void
|
|
deadline(int64 now, Eface arg)
|
|
{
|
|
deadlineimpl(now, arg, true, true);
|
|
}
|
|
|
|
static void
|
|
readDeadline(int64 now, Eface arg)
|
|
{
|
|
deadlineimpl(now, arg, true, false);
|
|
}
|
|
|
|
static void
|
|
writeDeadline(int64 now, Eface arg)
|
|
{
|
|
deadlineimpl(now, arg, false, true);
|
|
}
|
|
|
|
static PollDesc*
|
|
allocPollDesc(void)
|
|
{
|
|
PollDesc *pd;
|
|
uint32 i, n;
|
|
|
|
runtime_lock(&pollcache);
|
|
if(pollcache.first == nil) {
|
|
n = PageSize/sizeof(*pd);
|
|
if(n == 0)
|
|
n = 1;
|
|
// Must be in non-GC memory because can be referenced
|
|
// only from epoll/kqueue internals.
|
|
pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
|
|
for(i = 0; i < n; i++) {
|
|
pd[i].link = pollcache.first;
|
|
pollcache.first = &pd[i];
|
|
}
|
|
}
|
|
pd = pollcache.first;
|
|
pollcache.first = pd->link;
|
|
runtime_unlock(&pollcache);
|
|
return pd;
|
|
}
|