Retro68/gcc/libgo/runtime/lock_sema.c

282 lines
6.3 KiB
C
Raw Normal View History

2012-03-27 23:13:14 +00:00
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
2015-08-28 15:33:40 +00:00
// +build darwin nacl netbsd openbsd plan9 solaris windows
2012-03-27 23:13:14 +00:00
#include "runtime.h"
// This implementation depends on OS-specific implementations of
//
// uintptr runtime_semacreate(void)
// Create a semaphore, which will be assigned to m->waitsema.
// The zero value is treated as absence of any semaphore,
// so be sure to return a non-zero value.
//
// int32 runtime_semasleep(int64 ns)
// If ns < 0, acquire m->waitsema and return 0.
// If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
//
// int32 runtime_semawakeup(M *mp)
// Wake up mp, which is or will soon be sleeping on mp->waitsema.
//
enum
{
LOCKED = 1,
ACTIVE_SPIN = 4,
ACTIVE_SPIN_CNT = 30,
PASSIVE_SPIN = 1,
};
void
runtime_lock(Lock *l)
{
M *m;
uintptr v;
uint32 i, spin;
m = runtime_m();
if(m->locks++ < 0)
runtime_throw("runtime_lock: lock count");
// Speculative grab for lock.
2014-09-21 17:33:12 +00:00
if(runtime_casp((void**)&l->key, nil, (void*)LOCKED))
2012-03-27 23:13:14 +00:00
return;
if(m->waitsema == 0)
m->waitsema = runtime_semacreate();
// On uniprocessor's, no point spinning.
// On multiprocessors, spin for ACTIVE_SPIN attempts.
spin = 0;
if(runtime_ncpu > 1)
spin = ACTIVE_SPIN;
for(i=0;; i++) {
2014-09-21 17:33:12 +00:00
v = (uintptr)runtime_atomicloadp((void**)&l->key);
2012-03-27 23:13:14 +00:00
if((v&LOCKED) == 0) {
unlocked:
2014-09-21 17:33:12 +00:00
if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED)))
2012-03-27 23:13:14 +00:00
return;
i = 0;
}
if(i<spin)
runtime_procyield(ACTIVE_SPIN_CNT);
else if(i<spin+PASSIVE_SPIN)
runtime_osyield();
else {
// Someone else has it.
// l->waitm points to a linked list of M's waiting
// for this lock, chained through m->nextwaitm.
// Queue this M.
for(;;) {
m->nextwaitm = (void*)(v&~LOCKED);
2014-09-21 17:33:12 +00:00
if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED)))
2012-03-27 23:13:14 +00:00
break;
2014-09-21 17:33:12 +00:00
v = (uintptr)runtime_atomicloadp((void**)&l->key);
2012-03-27 23:13:14 +00:00
if((v&LOCKED) == 0)
goto unlocked;
}
if(v&LOCKED) {
// Queued. Wait.
runtime_semasleep(-1);
i = 0;
}
}
}
}
void
runtime_unlock(Lock *l)
{
uintptr v;
M *mp;
for(;;) {
2014-09-21 17:33:12 +00:00
v = (uintptr)runtime_atomicloadp((void**)&l->key);
2012-03-27 23:13:14 +00:00
if(v == LOCKED) {
2014-09-21 17:33:12 +00:00
if(runtime_casp((void**)&l->key, (void*)LOCKED, nil))
2012-03-27 23:13:14 +00:00
break;
} else {
// Other M's are waiting for the lock.
// Dequeue an M.
mp = (void*)(v&~LOCKED);
2014-09-21 17:33:12 +00:00
if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
2012-03-27 23:13:14 +00:00
// Dequeued an M. Wake it.
runtime_semawakeup(mp);
break;
}
}
}
2014-09-21 17:33:12 +00:00
if(--runtime_m()->locks < 0)
runtime_throw("runtime_unlock: lock count");
2012-03-27 23:13:14 +00:00
}
// One-time notifications.
void
runtime_noteclear(Note *n)
{
2014-09-21 17:33:12 +00:00
n->key = 0;
2012-03-27 23:13:14 +00:00
}
void
runtime_notewakeup(Note *n)
{
M *mp;
do
2014-09-21 17:33:12 +00:00
mp = runtime_atomicloadp((void**)&n->key);
while(!runtime_casp((void**)&n->key, mp, (void*)LOCKED));
2012-03-27 23:13:14 +00:00
// Successfully set waitm to LOCKED.
// What was it before?
if(mp == nil) {
// Nothing was waiting. Done.
} else if(mp == (M*)LOCKED) {
// Two notewakeups! Not allowed.
runtime_throw("notewakeup - double wakeup");
} else {
// Must be the waiting m. Wake it up.
runtime_semawakeup(mp);
}
}
void
runtime_notesleep(Note *n)
{
M *m;
m = runtime_m();
2014-09-21 17:33:12 +00:00
/* For gccgo it's OK to sleep in non-g0, and it happens in
stoptheworld because we have not implemented preemption.
if(runtime_g() != m->g0)
runtime_throw("notesleep not on g0");
*/
2012-03-27 23:13:14 +00:00
if(m->waitsema == 0)
m->waitsema = runtime_semacreate();
2014-09-21 17:33:12 +00:00
if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup)
if(n->key != LOCKED)
2012-03-27 23:13:14 +00:00
runtime_throw("notesleep - waitm out of sync");
return;
}
// Queued. Sleep.
2015-08-28 15:33:40 +00:00
m->blocked = true;
2012-03-27 23:13:14 +00:00
runtime_semasleep(-1);
2015-08-28 15:33:40 +00:00
m->blocked = false;
2012-03-27 23:13:14 +00:00
}
2014-09-21 17:33:12 +00:00
static bool
notetsleep(Note *n, int64 ns, int64 deadline, M *mp)
2012-03-27 23:13:14 +00:00
{
M *m;
m = runtime_m();
2014-09-21 17:33:12 +00:00
// Conceptually, deadline and mp are local variables.
// They are passed as arguments so that the space for them
// does not count against our nosplit stack sequence.
2012-03-27 23:13:14 +00:00
// Register for wakeup on n->waitm.
2014-09-21 17:33:12 +00:00
if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already)
if(n->key != LOCKED)
2012-03-27 23:13:14 +00:00
runtime_throw("notetsleep - waitm out of sync");
2014-09-21 17:33:12 +00:00
return true;
}
if(ns < 0) {
// Queued. Sleep.
2015-08-28 15:33:40 +00:00
m->blocked = true;
2014-09-21 17:33:12 +00:00
runtime_semasleep(-1);
2015-08-28 15:33:40 +00:00
m->blocked = false;
2014-09-21 17:33:12 +00:00
return true;
2012-03-27 23:13:14 +00:00
}
deadline = runtime_nanotime() + ns;
for(;;) {
// Registered. Sleep.
2015-08-28 15:33:40 +00:00
m->blocked = true;
2012-03-27 23:13:14 +00:00
if(runtime_semasleep(ns) >= 0) {
2015-08-28 15:33:40 +00:00
m->blocked = false;
2012-03-27 23:13:14 +00:00
// Acquired semaphore, semawakeup unregistered us.
// Done.
2014-09-21 17:33:12 +00:00
return true;
2012-03-27 23:13:14 +00:00
}
2015-08-28 15:33:40 +00:00
m->blocked = false;
2012-03-27 23:13:14 +00:00
// Interrupted or timed out. Still registered. Semaphore not acquired.
2014-09-21 17:33:12 +00:00
ns = deadline - runtime_nanotime();
if(ns <= 0)
2012-03-27 23:13:14 +00:00
break;
// Deadline hasn't arrived. Keep sleeping.
}
// Deadline arrived. Still registered. Semaphore not acquired.
// Want to give up and return, but have to unregister first,
// so that any notewakeup racing with the return does not
// try to grant us the semaphore when we don't expect it.
for(;;) {
2014-09-21 17:33:12 +00:00
mp = runtime_atomicloadp((void**)&n->key);
2012-03-27 23:13:14 +00:00
if(mp == m) {
// No wakeup yet; unregister if possible.
2014-09-21 17:33:12 +00:00
if(runtime_casp((void**)&n->key, mp, nil))
return false;
2012-03-27 23:13:14 +00:00
} else if(mp == (M*)LOCKED) {
// Wakeup happened so semaphore is available.
// Grab it to avoid getting out of sync.
2015-08-28 15:33:40 +00:00
m->blocked = true;
2012-03-27 23:13:14 +00:00
if(runtime_semasleep(-1) < 0)
runtime_throw("runtime: unable to acquire - semaphore out of sync");
2015-08-28 15:33:40 +00:00
m->blocked = false;
2014-09-21 17:33:12 +00:00
return true;
} else
2012-03-27 23:13:14 +00:00
runtime_throw("runtime: unexpected waitm - semaphore out of sync");
}
}
2014-09-21 17:33:12 +00:00
bool
runtime_notetsleep(Note *n, int64 ns)
{
M *m;
bool res;
m = runtime_m();
if(runtime_g() != m->g0 && !m->gcing)
runtime_throw("notetsleep not on g0");
if(m->waitsema == 0)
m->waitsema = runtime_semacreate();
res = notetsleep(n, ns, 0, nil);
return res;
}
// same as runtime_notetsleep, but called on user g (not g0)
// calls only nosplit functions between entersyscallblock/exitsyscall
bool
runtime_notetsleepg(Note *n, int64 ns)
{
M *m;
bool res;
m = runtime_m();
if(runtime_g() == m->g0)
runtime_throw("notetsleepg on g0");
if(m->waitsema == 0)
m->waitsema = runtime_semacreate();
runtime_entersyscallblock();
res = notetsleep(n, ns, 0, nil);
runtime_exitsyscall();
return res;
}