Retro68/gcc/libgo/go/sync/waitgroup.go

142 lines
4.4 KiB
Go
Raw Normal View History

2012-03-27 23:13:14 +00:00
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync
2014-09-21 17:33:12 +00:00
import (
2017-04-10 11:32:00 +00:00
"internal/race"
2014-09-21 17:33:12 +00:00
"sync/atomic"
"unsafe"
)
2012-03-27 23:13:14 +00:00
// A WaitGroup waits for a collection of goroutines to finish.
// The main goroutine calls Add to set the number of
// goroutines to wait for. Then each of the goroutines
// runs and calls Done when finished. At the same time,
2012-03-27 23:13:14 +00:00
// Wait can be used to block until all goroutines have finished.
//
// A WaitGroup must not be copied after first use.
2012-03-27 23:13:14 +00:00
type WaitGroup struct {
noCopy noCopy
2017-04-10 11:32:00 +00:00
// 64-bit value: high 32 bits are counter, low 32 bits are waiter count.
// 64-bit atomic operations require 64-bit alignment, but 32-bit
// compilers do not ensure it. So we allocate 12 bytes and then use
2019-06-02 15:48:37 +00:00
// the aligned 8 bytes in them as state, and the other 4 as storage
// for the sema.
state1 [3]uint32
2012-03-27 23:13:14 +00:00
}
2019-06-02 15:48:37 +00:00
// state returns pointers to the state and sema fields stored within wg.state1.
func (wg *WaitGroup) state() (statep *uint64, semap *uint32) {
2017-04-10 11:32:00 +00:00
if uintptr(unsafe.Pointer(&wg.state1))%8 == 0 {
2019-06-02 15:48:37 +00:00
return (*uint64)(unsafe.Pointer(&wg.state1)), &wg.state1[2]
2017-04-10 11:32:00 +00:00
} else {
2019-06-02 15:48:37 +00:00
return (*uint64)(unsafe.Pointer(&wg.state1[1])), &wg.state1[0]
2017-04-10 11:32:00 +00:00
}
}
2012-03-27 23:13:14 +00:00
// Add adds delta, which may be negative, to the WaitGroup counter.
2014-09-21 17:33:12 +00:00
// If the counter becomes zero, all goroutines blocked on Wait are released.
// If the counter goes negative, Add panics.
//
2015-08-28 15:33:40 +00:00
// Note that calls with a positive delta that occur when the counter is zero
// must happen before a Wait. Calls with a negative delta, or calls with a
// positive delta that start when the counter is greater than zero, may happen
// at any time.
// Typically this means the calls to Add should execute before the statement
// creating the goroutine or other event to be waited for.
2017-04-10 11:32:00 +00:00
// If a WaitGroup is reused to wait for several independent sets of events,
// new Add calls must happen after all previous Wait calls have returned.
2015-08-28 15:33:40 +00:00
// See the WaitGroup example.
2012-03-27 23:13:14 +00:00
func (wg *WaitGroup) Add(delta int) {
2019-06-02 15:48:37 +00:00
statep, semap := wg.state()
2017-04-10 11:32:00 +00:00
if race.Enabled {
_ = *statep // trigger nil deref early
2014-09-21 17:33:12 +00:00
if delta < 0 {
// Synchronize decrements with Wait.
2017-04-10 11:32:00 +00:00
race.ReleaseMerge(unsafe.Pointer(wg))
2014-09-21 17:33:12 +00:00
}
2017-04-10 11:32:00 +00:00
race.Disable()
defer race.Enable()
2014-09-21 17:33:12 +00:00
}
2017-04-10 11:32:00 +00:00
state := atomic.AddUint64(statep, uint64(delta)<<32)
v := int32(state >> 32)
w := uint32(state)
2018-12-28 15:30:48 +00:00
if race.Enabled && delta > 0 && v == int32(delta) {
// The first increment must be synchronized with Wait.
// Need to model this as a read, because there can be
// several concurrent wg.counter transitions from 0.
2019-06-02 15:48:37 +00:00
race.Read(unsafe.Pointer(semap))
2014-09-21 17:33:12 +00:00
}
2012-03-27 23:13:14 +00:00
if v < 0 {
2014-09-21 17:33:12 +00:00
panic("sync: negative WaitGroup counter")
2012-03-27 23:13:14 +00:00
}
2017-04-10 11:32:00 +00:00
if w != 0 && delta > 0 && v == int32(delta) {
panic("sync: WaitGroup misuse: Add called concurrently with Wait")
}
if v > 0 || w == 0 {
2012-03-27 23:13:14 +00:00
return
}
2017-04-10 11:32:00 +00:00
// This goroutine has set counter to 0 when waiters > 0.
// Now there can't be concurrent mutations of state:
// - Adds must not happen concurrently with Wait,
// - Wait does not increment waiters if it sees counter == 0.
// Still do a cheap sanity check to detect WaitGroup misuse.
if *statep != state {
panic("sync: WaitGroup misuse: Add called concurrently with Wait")
}
// Reset waiters count to 0.
*statep = 0
for ; w != 0; w-- {
2019-06-02 15:48:37 +00:00
runtime_Semrelease(semap, false)
2012-03-27 23:13:14 +00:00
}
}
2018-12-28 15:30:48 +00:00
// Done decrements the WaitGroup counter by one.
2012-03-27 23:13:14 +00:00
func (wg *WaitGroup) Done() {
wg.Add(-1)
}
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
2019-06-02 15:48:37 +00:00
statep, semap := wg.state()
2017-04-10 11:32:00 +00:00
if race.Enabled {
_ = *statep // trigger nil deref early
race.Disable()
2014-09-21 17:33:12 +00:00
}
2017-04-10 11:32:00 +00:00
for {
state := atomic.LoadUint64(statep)
v := int32(state >> 32)
w := uint32(state)
if v == 0 {
// Counter is 0, no need to wait.
if race.Enabled {
race.Enable()
race.Acquire(unsafe.Pointer(wg))
}
return
2014-09-21 17:33:12 +00:00
}
2017-04-10 11:32:00 +00:00
// Increment waiters count.
if atomic.CompareAndSwapUint64(statep, state, state+1) {
if race.Enabled && w == 0 {
// Wait must be synchronized with the first Add.
// Need to model this is as a write to race with the read in Add.
// As a consequence, can do the write only for the first waiter,
// otherwise concurrent Waits will race with each other.
2019-06-02 15:48:37 +00:00
race.Write(unsafe.Pointer(semap))
2017-04-10 11:32:00 +00:00
}
2019-06-02 15:48:37 +00:00
runtime_Semacquire(semap)
2017-04-10 11:32:00 +00:00
if *statep != 0 {
panic("sync: WaitGroup is reused before previous Wait has returned")
}
if race.Enabled {
race.Enable()
race.Acquire(unsafe.Pointer(wg))
}
return
2014-09-21 17:33:12 +00:00
}
}
2012-03-27 23:13:14 +00:00
}