Initial commit: Go 1.23 release state
This commit is contained in:
115
src/sync/atomic/asm.s
Normal file
115
src/sync/atomic/asm.s
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !race
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·SwapInt32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xchg(SB)
|
||||
|
||||
TEXT ·SwapUint32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xchg(SB)
|
||||
|
||||
TEXT ·SwapInt64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xchg64(SB)
|
||||
|
||||
TEXT ·SwapUint64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xchg64(SB)
|
||||
|
||||
TEXT ·SwapUintptr(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xchguintptr(SB)
|
||||
|
||||
TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Cas(SB)
|
||||
|
||||
TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Cas(SB)
|
||||
|
||||
TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Casuintptr(SB)
|
||||
|
||||
TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Cas64(SB)
|
||||
|
||||
TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Cas64(SB)
|
||||
|
||||
TEXT ·AddInt32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xadd(SB)
|
||||
|
||||
TEXT ·AddUint32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xadd(SB)
|
||||
|
||||
TEXT ·AddUintptr(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xadduintptr(SB)
|
||||
|
||||
TEXT ·AddInt64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xadd64(SB)
|
||||
|
||||
TEXT ·AddUint64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Xadd64(SB)
|
||||
|
||||
TEXT ·LoadInt32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Load(SB)
|
||||
|
||||
TEXT ·LoadUint32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Load(SB)
|
||||
|
||||
TEXT ·LoadInt64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Load64(SB)
|
||||
|
||||
TEXT ·LoadUint64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Load64(SB)
|
||||
|
||||
TEXT ·LoadUintptr(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Loaduintptr(SB)
|
||||
|
||||
TEXT ·LoadPointer(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Loadp(SB)
|
||||
|
||||
TEXT ·StoreInt32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Store(SB)
|
||||
|
||||
TEXT ·StoreUint32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Store(SB)
|
||||
|
||||
TEXT ·StoreInt64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Store64(SB)
|
||||
|
||||
TEXT ·StoreUint64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Store64(SB)
|
||||
|
||||
TEXT ·StoreUintptr(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Storeuintptr(SB)
|
||||
|
||||
TEXT ·AndInt32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·And32(SB)
|
||||
|
||||
TEXT ·AndUint32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·And32(SB)
|
||||
|
||||
TEXT ·AndUintptr(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Anduintptr(SB)
|
||||
|
||||
TEXT ·AndInt64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·And64(SB)
|
||||
|
||||
TEXT ·AndUint64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·And64(SB)
|
||||
|
||||
TEXT ·OrInt32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Or32(SB)
|
||||
|
||||
TEXT ·OrUint32(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Or32(SB)
|
||||
|
||||
TEXT ·OrUintptr(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Oruintptr(SB)
|
||||
|
||||
TEXT ·OrInt64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Or64(SB)
|
||||
|
||||
TEXT ·OrUint64(SB),NOSPLIT,$0
|
||||
JMP internal∕runtime∕atomic·Or64(SB)
|
||||
3000
src/sync/atomic/atomic_test.go
Normal file
3000
src/sync/atomic/atomic_test.go
Normal file
File diff suppressed because it is too large
Load Diff
244
src/sync/atomic/doc.go
Normal file
244
src/sync/atomic/doc.go
Normal file
@@ -0,0 +1,244 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package atomic provides low-level atomic memory primitives
|
||||
// useful for implementing synchronization algorithms.
|
||||
//
|
||||
// These functions require great care to be used correctly.
|
||||
// Except for special, low-level applications, synchronization is better
|
||||
// done with channels or the facilities of the [sync] package.
|
||||
// Share memory by communicating;
|
||||
// don't communicate by sharing memory.
|
||||
//
|
||||
// The swap operation, implemented by the SwapT functions, is the atomic
|
||||
// equivalent of:
|
||||
//
|
||||
// old = *addr
|
||||
// *addr = new
|
||||
// return old
|
||||
//
|
||||
// The compare-and-swap operation, implemented by the CompareAndSwapT
|
||||
// functions, is the atomic equivalent of:
|
||||
//
|
||||
// if *addr == old {
|
||||
// *addr = new
|
||||
// return true
|
||||
// }
|
||||
// return false
|
||||
//
|
||||
// The add operation, implemented by the AddT functions, is the atomic
|
||||
// equivalent of:
|
||||
//
|
||||
// *addr += delta
|
||||
// return *addr
|
||||
//
|
||||
// The load and store operations, implemented by the LoadT and StoreT
|
||||
// functions, are the atomic equivalents of "return *addr" and
|
||||
// "*addr = val".
|
||||
//
|
||||
// In the terminology of [the Go memory model], if the effect of
|
||||
// an atomic operation A is observed by atomic operation B,
|
||||
// then A “synchronizes before” B.
|
||||
// Additionally, all the atomic operations executed in a program
|
||||
// behave as though executed in some sequentially consistent order.
|
||||
// This definition provides the same semantics as
|
||||
// C++'s sequentially consistent atomics and Java's volatile variables.
|
||||
//
|
||||
// [the Go memory model]: https://go.dev/ref/mem
|
||||
package atomic
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// BUG(rsc): On 386, the 64-bit functions use instructions unavailable before the Pentium MMX.
|
||||
//
|
||||
// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
|
||||
//
|
||||
// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
|
||||
// for 64-bit alignment of 64-bit words accessed atomically via the primitive
|
||||
// atomic functions (types [Int64] and [Uint64] are automatically aligned).
|
||||
// The first word in an allocated struct, array, or slice; in a global
|
||||
// variable; or in a local variable (because the subject of all atomic operations
|
||||
// will escape to the heap) can be relied upon to be 64-bit aligned.
|
||||
|
||||
// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
|
||||
// Consider using the more ergonomic and less error-prone [Int32.Swap] instead.
|
||||
func SwapInt32(addr *int32, new int32) (old int32)
|
||||
|
||||
// SwapInt64 atomically stores new into *addr and returns the previous *addr value.
|
||||
// Consider using the more ergonomic and less error-prone [Int64.Swap] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func SwapInt64(addr *int64, new int64) (old int64)
|
||||
|
||||
// SwapUint32 atomically stores new into *addr and returns the previous *addr value.
|
||||
// Consider using the more ergonomic and less error-prone [Uint32.Swap] instead.
|
||||
func SwapUint32(addr *uint32, new uint32) (old uint32)
|
||||
|
||||
// SwapUint64 atomically stores new into *addr and returns the previous *addr value.
|
||||
// Consider using the more ergonomic and less error-prone [Uint64.Swap] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func SwapUint64(addr *uint64, new uint64) (old uint64)
|
||||
|
||||
// SwapUintptr atomically stores new into *addr and returns the previous *addr value.
|
||||
// Consider using the more ergonomic and less error-prone [Uintptr.Swap] instead.
|
||||
func SwapUintptr(addr *uintptr, new uintptr) (old uintptr)
|
||||
|
||||
// SwapPointer atomically stores new into *addr and returns the previous *addr value.
|
||||
// Consider using the more ergonomic and less error-prone [Pointer.Swap] instead.
|
||||
func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)
|
||||
|
||||
// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
|
||||
// Consider using the more ergonomic and less error-prone [Int32.CompareAndSwap] instead.
|
||||
func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
|
||||
|
||||
// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.
|
||||
// Consider using the more ergonomic and less error-prone [Int64.CompareAndSwap] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
|
||||
|
||||
// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.
|
||||
// Consider using the more ergonomic and less error-prone [Uint32.CompareAndSwap] instead.
|
||||
func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
|
||||
|
||||
// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.
|
||||
// Consider using the more ergonomic and less error-prone [Uint64.CompareAndSwap] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
|
||||
|
||||
// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
|
||||
// Consider using the more ergonomic and less error-prone [Uintptr.CompareAndSwap] instead.
|
||||
func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool)
|
||||
|
||||
// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value.
|
||||
// Consider using the more ergonomic and less error-prone [Pointer.CompareAndSwap] instead.
|
||||
func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
|
||||
|
||||
// AddInt32 atomically adds delta to *addr and returns the new value.
|
||||
// Consider using the more ergonomic and less error-prone [Int32.Add] instead.
|
||||
func AddInt32(addr *int32, delta int32) (new int32)
|
||||
|
||||
// AddUint32 atomically adds delta to *addr and returns the new value.
|
||||
// To subtract a signed positive constant value c from x, do AddUint32(&x, ^uint32(c-1)).
|
||||
// In particular, to decrement x, do AddUint32(&x, ^uint32(0)).
|
||||
// Consider using the more ergonomic and less error-prone [Uint32.Add] instead.
|
||||
func AddUint32(addr *uint32, delta uint32) (new uint32)
|
||||
|
||||
// AddInt64 atomically adds delta to *addr and returns the new value.
|
||||
// Consider using the more ergonomic and less error-prone [Int64.Add] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func AddInt64(addr *int64, delta int64) (new int64)
|
||||
|
||||
// AddUint64 atomically adds delta to *addr and returns the new value.
|
||||
// To subtract a signed positive constant value c from x, do AddUint64(&x, ^uint64(c-1)).
|
||||
// In particular, to decrement x, do AddUint64(&x, ^uint64(0)).
|
||||
// Consider using the more ergonomic and less error-prone [Uint64.Add] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func AddUint64(addr *uint64, delta uint64) (new uint64)
|
||||
|
||||
// AddUintptr atomically adds delta to *addr and returns the new value.
|
||||
// Consider using the more ergonomic and less error-prone [Uintptr.Add] instead.
|
||||
func AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
|
||||
|
||||
// AndInt32 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Int32.And] instead.
|
||||
func AndInt32(addr *int32, mask int32) (old int32)
|
||||
|
||||
// AndUint32 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Uint32.And] instead.
|
||||
func AndUint32(addr *uint32, mask uint32) (old uint32)
|
||||
|
||||
// AndInt64 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Int64.And] instead.
|
||||
func AndInt64(addr *int64, mask int64) (old int64)
|
||||
|
||||
// AndUint64 atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
|
||||
// and returns the old.
|
||||
// Consider using the more ergonomic and less error-prone [Uint64.And] instead.
|
||||
func AndUint64(addr *uint64, mask uint64) (old uint64)
|
||||
|
||||
// AndUintptr atomically performs a bitwise AND operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Uintptr.And] instead.
|
||||
func AndUintptr(addr *uintptr, mask uintptr) (old uintptr)
|
||||
|
||||
// OrInt32 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Int32.Or] instead.
|
||||
func OrInt32(addr *int32, mask int32) (old int32)
|
||||
|
||||
// OrUint32 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Uint32.Or] instead.
|
||||
func OrUint32(addr *uint32, mask uint32) (old uint32)
|
||||
|
||||
// OrInt64 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Int64.Or] instead.
|
||||
func OrInt64(addr *int64, mask int64) (old int64)
|
||||
|
||||
// OrUint64 atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Uint64.Or] instead.
|
||||
func OrUint64(addr *uint64, mask uint64) (old uint64)
|
||||
|
||||
// OrUintptr atomically performs a bitwise OR operation on *addr using the bitmask provided as mask
|
||||
// and returns the old value.
|
||||
// Consider using the more ergonomic and less error-prone [Uintptr.Or] instead.
|
||||
func OrUintptr(addr *uintptr, mask uintptr) (old uintptr)
|
||||
|
||||
// LoadInt32 atomically loads *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Int32.Load] instead.
|
||||
func LoadInt32(addr *int32) (val int32)
|
||||
|
||||
// LoadInt64 atomically loads *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Int64.Load] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func LoadInt64(addr *int64) (val int64)
|
||||
|
||||
// LoadUint32 atomically loads *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Uint32.Load] instead.
|
||||
func LoadUint32(addr *uint32) (val uint32)
|
||||
|
||||
// LoadUint64 atomically loads *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Uint64.Load] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func LoadUint64(addr *uint64) (val uint64)
|
||||
|
||||
// LoadUintptr atomically loads *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Uintptr.Load] instead.
|
||||
func LoadUintptr(addr *uintptr) (val uintptr)
|
||||
|
||||
// LoadPointer atomically loads *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Pointer.Load] instead.
|
||||
func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
|
||||
|
||||
// StoreInt32 atomically stores val into *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Int32.Store] instead.
|
||||
func StoreInt32(addr *int32, val int32)
|
||||
|
||||
// StoreInt64 atomically stores val into *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Int64.Store] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func StoreInt64(addr *int64, val int64)
|
||||
|
||||
// StoreUint32 atomically stores val into *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Uint32.Store] instead.
|
||||
func StoreUint32(addr *uint32, val uint32)
|
||||
|
||||
// StoreUint64 atomically stores val into *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Uint64.Store] instead
|
||||
// (particularly if you target 32-bit platforms; see the bugs section).
|
||||
func StoreUint64(addr *uint64, val uint64)
|
||||
|
||||
// StoreUintptr atomically stores val into *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Uintptr.Store] instead.
|
||||
func StoreUintptr(addr *uintptr, val uintptr)
|
||||
|
||||
// StorePointer atomically stores val into *addr.
|
||||
// Consider using the more ergonomic and less error-prone [Pointer.Store] instead.
|
||||
func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)
|
||||
76
src/sync/atomic/example_test.go
Normal file
76
src/sync/atomic/example_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package atomic_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
func loadConfig() map[string]string {
|
||||
return make(map[string]string)
|
||||
}
|
||||
|
||||
func requests() chan int {
|
||||
return make(chan int)
|
||||
}
|
||||
|
||||
// The following example shows how to use Value for periodic program config updates
|
||||
// and propagation of the changes to worker goroutines.
|
||||
func ExampleValue_config() {
|
||||
var config atomic.Value // holds current server configuration
|
||||
// Create initial config value and store into config.
|
||||
config.Store(loadConfig())
|
||||
go func() {
|
||||
// Reload config every 10 seconds
|
||||
// and update config value with the new version.
|
||||
for {
|
||||
time.Sleep(10 * time.Second)
|
||||
config.Store(loadConfig())
|
||||
}
|
||||
}()
|
||||
// Create worker goroutines that handle incoming requests
|
||||
// using the latest config value.
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
for r := range requests() {
|
||||
c := config.Load()
|
||||
// Handle request r using config c.
|
||||
_, _ = r, c
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// The following example shows how to maintain a scalable frequently read,
|
||||
// but infrequently updated data structure using copy-on-write idiom.
|
||||
func ExampleValue_readMostly() {
|
||||
type Map map[string]string
|
||||
var m atomic.Value
|
||||
m.Store(make(Map))
|
||||
var mu sync.Mutex // used only by writers
|
||||
// read function can be used to read the data without further synchronization
|
||||
read := func(key string) (val string) {
|
||||
m1 := m.Load().(Map)
|
||||
return m1[key]
|
||||
}
|
||||
// insert function can be used to update the data without further synchronization
|
||||
insert := func(key, val string) {
|
||||
mu.Lock() // synchronize with other potential writers
|
||||
defer mu.Unlock()
|
||||
m1 := m.Load().(Map) // load current value of the data structure
|
||||
m2 := make(Map) // create a new value
|
||||
for k, v := range m1 {
|
||||
m2[k] = v // copy all data from the current object to the new one
|
||||
}
|
||||
m2[key] = val // do the update that we need
|
||||
m.Store(m2) // atomically replace the current object with the new one
|
||||
// At this point all new readers start working with the new version.
|
||||
// The old version will be garbage collected once the existing readers
|
||||
// (if any) are done with it.
|
||||
}
|
||||
_, _ = read, insert
|
||||
}
|
||||
8
src/sync/atomic/race.s
Normal file
8
src/sync/atomic/race.s
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build race
|
||||
|
||||
// This file is here only to allow external functions.
|
||||
// The operations are implemented in src/runtime/race_amd64.s
|
||||
240
src/sync/atomic/type.go
Normal file
240
src/sync/atomic/type.go
Normal file
@@ -0,0 +1,240 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package atomic
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// A Bool is an atomic boolean value.
|
||||
// The zero value is false.
|
||||
type Bool struct {
|
||||
_ noCopy
|
||||
v uint32
|
||||
}
|
||||
|
||||
// Load atomically loads and returns the value stored in x.
|
||||
func (x *Bool) Load() bool { return LoadUint32(&x.v) != 0 }
|
||||
|
||||
// Store atomically stores val into x.
|
||||
func (x *Bool) Store(val bool) { StoreUint32(&x.v, b32(val)) }
|
||||
|
||||
// Swap atomically stores new into x and returns the previous value.
|
||||
func (x *Bool) Swap(new bool) (old bool) { return SwapUint32(&x.v, b32(new)) != 0 }
|
||||
|
||||
// CompareAndSwap executes the compare-and-swap operation for the boolean value x.
|
||||
func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
|
||||
return CompareAndSwapUint32(&x.v, b32(old), b32(new))
|
||||
}
|
||||
|
||||
// b32 returns a uint32 0 or 1 representing b.
|
||||
func b32(b bool) uint32 {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// For testing *Pointer[T]'s methods can be inlined.
|
||||
// Keep in sync with cmd/compile/internal/test/inl_test.go:TestIntendedInlining.
|
||||
var _ = &Pointer[int]{}
|
||||
|
||||
// A Pointer is an atomic pointer of type *T. The zero value is a nil *T.
|
||||
type Pointer[T any] struct {
|
||||
// Mention *T in a field to disallow conversion between Pointer types.
|
||||
// See go.dev/issue/56603 for more details.
|
||||
// Use *T, not T, to avoid spurious recursive type definition errors.
|
||||
_ [0]*T
|
||||
|
||||
_ noCopy
|
||||
v unsafe.Pointer
|
||||
}
|
||||
|
||||
// Load atomically loads and returns the value stored in x.
|
||||
func (x *Pointer[T]) Load() *T { return (*T)(LoadPointer(&x.v)) }
|
||||
|
||||
// Store atomically stores val into x.
|
||||
func (x *Pointer[T]) Store(val *T) { StorePointer(&x.v, unsafe.Pointer(val)) }
|
||||
|
||||
// Swap atomically stores new into x and returns the previous value.
|
||||
func (x *Pointer[T]) Swap(new *T) (old *T) { return (*T)(SwapPointer(&x.v, unsafe.Pointer(new))) }
|
||||
|
||||
// CompareAndSwap executes the compare-and-swap operation for x.
|
||||
func (x *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
|
||||
return CompareAndSwapPointer(&x.v, unsafe.Pointer(old), unsafe.Pointer(new))
|
||||
}
|
||||
|
||||
// An Int32 is an atomic int32. The zero value is zero.
|
||||
type Int32 struct {
|
||||
_ noCopy
|
||||
v int32
|
||||
}
|
||||
|
||||
// Load atomically loads and returns the value stored in x.
|
||||
func (x *Int32) Load() int32 { return LoadInt32(&x.v) }
|
||||
|
||||
// Store atomically stores val into x.
|
||||
func (x *Int32) Store(val int32) { StoreInt32(&x.v, val) }
|
||||
|
||||
// Swap atomically stores new into x and returns the previous value.
|
||||
func (x *Int32) Swap(new int32) (old int32) { return SwapInt32(&x.v, new) }
|
||||
|
||||
// CompareAndSwap executes the compare-and-swap operation for x.
|
||||
func (x *Int32) CompareAndSwap(old, new int32) (swapped bool) {
|
||||
return CompareAndSwapInt32(&x.v, old, new)
|
||||
}
|
||||
|
||||
// Add atomically adds delta to x and returns the new value.
|
||||
func (x *Int32) Add(delta int32) (new int32) { return AddInt32(&x.v, delta) }
|
||||
|
||||
// And atomically performs a bitwise AND operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Int32) And(mask int32) (old int32) { return AndInt32(&x.v, mask) }
|
||||
|
||||
// Or atomically performs a bitwise OR operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Int32) Or(mask int32) (old int32) { return OrInt32(&x.v, mask) }
|
||||
|
||||
// An Int64 is an atomic int64. The zero value is zero.
|
||||
type Int64 struct {
|
||||
_ noCopy
|
||||
_ align64
|
||||
v int64
|
||||
}
|
||||
|
||||
// Load atomically loads and returns the value stored in x.
|
||||
func (x *Int64) Load() int64 { return LoadInt64(&x.v) }
|
||||
|
||||
// Store atomically stores val into x.
|
||||
func (x *Int64) Store(val int64) { StoreInt64(&x.v, val) }
|
||||
|
||||
// Swap atomically stores new into x and returns the previous value.
|
||||
func (x *Int64) Swap(new int64) (old int64) { return SwapInt64(&x.v, new) }
|
||||
|
||||
// CompareAndSwap executes the compare-and-swap operation for x.
|
||||
func (x *Int64) CompareAndSwap(old, new int64) (swapped bool) {
|
||||
return CompareAndSwapInt64(&x.v, old, new)
|
||||
}
|
||||
|
||||
// Add atomically adds delta to x and returns the new value.
|
||||
func (x *Int64) Add(delta int64) (new int64) { return AddInt64(&x.v, delta) }
|
||||
|
||||
// And atomically performs a bitwise AND operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Int64) And(mask int64) (old int64) { return AndInt64(&x.v, mask) }
|
||||
|
||||
// Or atomically performs a bitwise OR operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Int64) Or(mask int64) (old int64) { return OrInt64(&x.v, mask) }
|
||||
|
||||
// A Uint32 is an atomic uint32. The zero value is zero.
|
||||
type Uint32 struct {
|
||||
_ noCopy
|
||||
v uint32
|
||||
}
|
||||
|
||||
// Load atomically loads and returns the value stored in x.
|
||||
func (x *Uint32) Load() uint32 { return LoadUint32(&x.v) }
|
||||
|
||||
// Store atomically stores val into x.
|
||||
func (x *Uint32) Store(val uint32) { StoreUint32(&x.v, val) }
|
||||
|
||||
// Swap atomically stores new into x and returns the previous value.
|
||||
func (x *Uint32) Swap(new uint32) (old uint32) { return SwapUint32(&x.v, new) }
|
||||
|
||||
// CompareAndSwap executes the compare-and-swap operation for x.
|
||||
func (x *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
|
||||
return CompareAndSwapUint32(&x.v, old, new)
|
||||
}
|
||||
|
||||
// Add atomically adds delta to x and returns the new value.
|
||||
func (x *Uint32) Add(delta uint32) (new uint32) { return AddUint32(&x.v, delta) }
|
||||
|
||||
// And atomically performs a bitwise AND operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Uint32) And(mask uint32) (old uint32) { return AndUint32(&x.v, mask) }
|
||||
|
||||
// Or atomically performs a bitwise OR operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Uint32) Or(mask uint32) (old uint32) { return OrUint32(&x.v, mask) }
|
||||
|
||||
// A Uint64 is an atomic uint64. The zero value is zero.
|
||||
type Uint64 struct {
|
||||
_ noCopy
|
||||
_ align64
|
||||
v uint64
|
||||
}
|
||||
|
||||
// Load atomically loads and returns the value stored in x.
|
||||
func (x *Uint64) Load() uint64 { return LoadUint64(&x.v) }
|
||||
|
||||
// Store atomically stores val into x.
|
||||
func (x *Uint64) Store(val uint64) { StoreUint64(&x.v, val) }
|
||||
|
||||
// Swap atomically stores new into x and returns the previous value.
|
||||
func (x *Uint64) Swap(new uint64) (old uint64) { return SwapUint64(&x.v, new) }
|
||||
|
||||
// CompareAndSwap executes the compare-and-swap operation for x.
|
||||
func (x *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
|
||||
return CompareAndSwapUint64(&x.v, old, new)
|
||||
}
|
||||
|
||||
// Add atomically adds delta to x and returns the new value.
|
||||
func (x *Uint64) Add(delta uint64) (new uint64) { return AddUint64(&x.v, delta) }
|
||||
|
||||
// And atomically performs a bitwise AND operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Uint64) And(mask uint64) (old uint64) { return AndUint64(&x.v, mask) }
|
||||
|
||||
// Or atomically performs a bitwise OR operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Uint64) Or(mask uint64) (old uint64) { return OrUint64(&x.v, mask) }
|
||||
|
||||
// A Uintptr is an atomic uintptr. The zero value is zero.
|
||||
type Uintptr struct {
|
||||
_ noCopy
|
||||
v uintptr
|
||||
}
|
||||
|
||||
// Load atomically loads and returns the value stored in x.
|
||||
func (x *Uintptr) Load() uintptr { return LoadUintptr(&x.v) }
|
||||
|
||||
// Store atomically stores val into x.
|
||||
func (x *Uintptr) Store(val uintptr) { StoreUintptr(&x.v, val) }
|
||||
|
||||
// Swap atomically stores new into x and returns the previous value.
|
||||
func (x *Uintptr) Swap(new uintptr) (old uintptr) { return SwapUintptr(&x.v, new) }
|
||||
|
||||
// CompareAndSwap executes the compare-and-swap operation for x.
|
||||
func (x *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
|
||||
return CompareAndSwapUintptr(&x.v, old, new)
|
||||
}
|
||||
|
||||
// Add atomically adds delta to x and returns the new value.
|
||||
func (x *Uintptr) Add(delta uintptr) (new uintptr) { return AddUintptr(&x.v, delta) }
|
||||
|
||||
// And atomically performs a bitwise AND operation on x using the bitmask
|
||||
// provided as mask and returns the old value.
|
||||
func (x *Uintptr) And(mask uintptr) (old uintptr) { return AndUintptr(&x.v, mask) }
|
||||
|
||||
// Or atomically performs a bitwise OR operation on x using the bitmask
|
||||
// provided as mask and returns the updated value after the OR operation.
|
||||
func (x *Uintptr) Or(mask uintptr) (old uintptr) { return OrUintptr(&x.v, mask) }
|
||||
|
||||
// noCopy may be added to structs which must not be copied
|
||||
// after the first use.
|
||||
//
|
||||
// See https://golang.org/issues/8005#issuecomment-190753527
|
||||
// for details.
|
||||
//
|
||||
// Note that it must not be embedded, due to the Lock and Unlock methods.
|
||||
type noCopy struct{}
|
||||
|
||||
// Lock is a no-op used by -copylocks checker from `go vet`.
|
||||
func (*noCopy) Lock() {}
|
||||
func (*noCopy) Unlock() {}
|
||||
|
||||
// align64 may be added to structs that must be 64-bit aligned.
|
||||
// This struct is recognized by a special case in the compiler
|
||||
// and will not work if copied to any other package.
|
||||
type align64 struct{}
|
||||
194
src/sync/atomic/value.go
Normal file
194
src/sync/atomic/value.go
Normal file
@@ -0,0 +1,194 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package atomic
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// A Value provides an atomic load and store of a consistently typed value.
|
||||
// The zero value for a Value returns nil from [Value.Load].
|
||||
// Once [Value.Store] has been called, a Value must not be copied.
|
||||
//
|
||||
// A Value must not be copied after first use.
|
||||
type Value struct {
|
||||
v any
|
||||
}
|
||||
|
||||
// efaceWords is interface{} internal representation.
|
||||
type efaceWords struct {
|
||||
typ unsafe.Pointer
|
||||
data unsafe.Pointer
|
||||
}
|
||||
|
||||
// Load returns the value set by the most recent Store.
|
||||
// It returns nil if there has been no call to Store for this Value.
|
||||
func (v *Value) Load() (val any) {
|
||||
vp := (*efaceWords)(unsafe.Pointer(v))
|
||||
typ := LoadPointer(&vp.typ)
|
||||
if typ == nil || typ == unsafe.Pointer(&firstStoreInProgress) {
|
||||
// First store not yet completed.
|
||||
return nil
|
||||
}
|
||||
data := LoadPointer(&vp.data)
|
||||
vlp := (*efaceWords)(unsafe.Pointer(&val))
|
||||
vlp.typ = typ
|
||||
vlp.data = data
|
||||
return
|
||||
}
|
||||
|
||||
var firstStoreInProgress byte
|
||||
|
||||
// Store sets the value of the [Value] v to val.
|
||||
// All calls to Store for a given Value must use values of the same concrete type.
|
||||
// Store of an inconsistent type panics, as does Store(nil).
|
||||
func (v *Value) Store(val any) {
|
||||
if val == nil {
|
||||
panic("sync/atomic: store of nil value into Value")
|
||||
}
|
||||
vp := (*efaceWords)(unsafe.Pointer(v))
|
||||
vlp := (*efaceWords)(unsafe.Pointer(&val))
|
||||
for {
|
||||
typ := LoadPointer(&vp.typ)
|
||||
if typ == nil {
|
||||
// Attempt to start first store.
|
||||
// Disable preemption so that other goroutines can use
|
||||
// active spin wait to wait for completion.
|
||||
runtime_procPin()
|
||||
if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
|
||||
runtime_procUnpin()
|
||||
continue
|
||||
}
|
||||
// Complete first store.
|
||||
StorePointer(&vp.data, vlp.data)
|
||||
StorePointer(&vp.typ, vlp.typ)
|
||||
runtime_procUnpin()
|
||||
return
|
||||
}
|
||||
if typ == unsafe.Pointer(&firstStoreInProgress) {
|
||||
// First store in progress. Wait.
|
||||
// Since we disable preemption around the first store,
|
||||
// we can wait with active spinning.
|
||||
continue
|
||||
}
|
||||
// First store completed. Check type and overwrite data.
|
||||
if typ != vlp.typ {
|
||||
panic("sync/atomic: store of inconsistently typed value into Value")
|
||||
}
|
||||
StorePointer(&vp.data, vlp.data)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Swap stores new into Value and returns the previous value. It returns nil if
|
||||
// the Value is empty.
|
||||
//
|
||||
// All calls to Swap for a given Value must use values of the same concrete
|
||||
// type. Swap of an inconsistent type panics, as does Swap(nil).
|
||||
func (v *Value) Swap(new any) (old any) {
|
||||
if new == nil {
|
||||
panic("sync/atomic: swap of nil value into Value")
|
||||
}
|
||||
vp := (*efaceWords)(unsafe.Pointer(v))
|
||||
np := (*efaceWords)(unsafe.Pointer(&new))
|
||||
for {
|
||||
typ := LoadPointer(&vp.typ)
|
||||
if typ == nil {
|
||||
// Attempt to start first store.
|
||||
// Disable preemption so that other goroutines can use
|
||||
// active spin wait to wait for completion; and so that
|
||||
// GC does not see the fake type accidentally.
|
||||
runtime_procPin()
|
||||
if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
|
||||
runtime_procUnpin()
|
||||
continue
|
||||
}
|
||||
// Complete first store.
|
||||
StorePointer(&vp.data, np.data)
|
||||
StorePointer(&vp.typ, np.typ)
|
||||
runtime_procUnpin()
|
||||
return nil
|
||||
}
|
||||
if typ == unsafe.Pointer(&firstStoreInProgress) {
|
||||
// First store in progress. Wait.
|
||||
// Since we disable preemption around the first store,
|
||||
// we can wait with active spinning.
|
||||
continue
|
||||
}
|
||||
// First store completed. Check type and overwrite data.
|
||||
if typ != np.typ {
|
||||
panic("sync/atomic: swap of inconsistently typed value into Value")
|
||||
}
|
||||
op := (*efaceWords)(unsafe.Pointer(&old))
|
||||
op.typ, op.data = np.typ, SwapPointer(&vp.data, np.data)
|
||||
return old
|
||||
}
|
||||
}
|
||||
|
||||
// CompareAndSwap executes the compare-and-swap operation for the [Value].
|
||||
//
|
||||
// All calls to CompareAndSwap for a given Value must use values of the same
|
||||
// concrete type. CompareAndSwap of an inconsistent type panics, as does
|
||||
// CompareAndSwap(old, nil).
|
||||
func (v *Value) CompareAndSwap(old, new any) (swapped bool) {
|
||||
if new == nil {
|
||||
panic("sync/atomic: compare and swap of nil value into Value")
|
||||
}
|
||||
vp := (*efaceWords)(unsafe.Pointer(v))
|
||||
np := (*efaceWords)(unsafe.Pointer(&new))
|
||||
op := (*efaceWords)(unsafe.Pointer(&old))
|
||||
if op.typ != nil && np.typ != op.typ {
|
||||
panic("sync/atomic: compare and swap of inconsistently typed values")
|
||||
}
|
||||
for {
|
||||
typ := LoadPointer(&vp.typ)
|
||||
if typ == nil {
|
||||
if old != nil {
|
||||
return false
|
||||
}
|
||||
// Attempt to start first store.
|
||||
// Disable preemption so that other goroutines can use
|
||||
// active spin wait to wait for completion; and so that
|
||||
// GC does not see the fake type accidentally.
|
||||
runtime_procPin()
|
||||
if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
|
||||
runtime_procUnpin()
|
||||
continue
|
||||
}
|
||||
// Complete first store.
|
||||
StorePointer(&vp.data, np.data)
|
||||
StorePointer(&vp.typ, np.typ)
|
||||
runtime_procUnpin()
|
||||
return true
|
||||
}
|
||||
if typ == unsafe.Pointer(&firstStoreInProgress) {
|
||||
// First store in progress. Wait.
|
||||
// Since we disable preemption around the first store,
|
||||
// we can wait with active spinning.
|
||||
continue
|
||||
}
|
||||
// First store completed. Check type and overwrite data.
|
||||
if typ != np.typ {
|
||||
panic("sync/atomic: compare and swap of inconsistently typed value into Value")
|
||||
}
|
||||
// Compare old and current via runtime equality check.
|
||||
// This allows value types to be compared, something
|
||||
// not offered by the package functions.
|
||||
// CompareAndSwapPointer below only ensures vp.data
|
||||
// has not changed since LoadPointer.
|
||||
data := LoadPointer(&vp.data)
|
||||
var i any
|
||||
(*efaceWords)(unsafe.Pointer(&i)).typ = typ
|
||||
(*efaceWords)(unsafe.Pointer(&i)).data = data
|
||||
if i != old {
|
||||
return false
|
||||
}
|
||||
return CompareAndSwapPointer(&vp.data, data, np.data)
|
||||
}
|
||||
}
|
||||
|
||||
// Disable/enable preemption, implemented in runtime.
|
||||
func runtime_procPin() int
|
||||
func runtime_procUnpin()
|
||||
274
src/sync/atomic/value_test.go
Normal file
274
src/sync/atomic/value_test.go
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package atomic_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
. "sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValue(t *testing.T) {
|
||||
var v Value
|
||||
if v.Load() != nil {
|
||||
t.Fatal("initial Value is not nil")
|
||||
}
|
||||
v.Store(42)
|
||||
x := v.Load()
|
||||
if xx, ok := x.(int); !ok || xx != 42 {
|
||||
t.Fatalf("wrong value: got %+v, want 42", x)
|
||||
}
|
||||
v.Store(84)
|
||||
x = v.Load()
|
||||
if xx, ok := x.(int); !ok || xx != 84 {
|
||||
t.Fatalf("wrong value: got %+v, want 84", x)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueLarge(t *testing.T) {
|
||||
var v Value
|
||||
v.Store("foo")
|
||||
x := v.Load()
|
||||
if xx, ok := x.(string); !ok || xx != "foo" {
|
||||
t.Fatalf("wrong value: got %+v, want foo", x)
|
||||
}
|
||||
v.Store("barbaz")
|
||||
x = v.Load()
|
||||
if xx, ok := x.(string); !ok || xx != "barbaz" {
|
||||
t.Fatalf("wrong value: got %+v, want barbaz", x)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuePanic(t *testing.T) {
|
||||
const nilErr = "sync/atomic: store of nil value into Value"
|
||||
const badErr = "sync/atomic: store of inconsistently typed value into Value"
|
||||
var v Value
|
||||
func() {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nilErr {
|
||||
t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr)
|
||||
}
|
||||
}()
|
||||
v.Store(nil)
|
||||
}()
|
||||
v.Store(42)
|
||||
func() {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != badErr {
|
||||
t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr)
|
||||
}
|
||||
}()
|
||||
v.Store("foo")
|
||||
}()
|
||||
func() {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nilErr {
|
||||
t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr)
|
||||
}
|
||||
}()
|
||||
v.Store(nil)
|
||||
}()
|
||||
}
|
||||
|
||||
func TestValueConcurrent(t *testing.T) {
|
||||
tests := [][]any{
|
||||
{uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)},
|
||||
{uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)},
|
||||
{uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)},
|
||||
{complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)},
|
||||
}
|
||||
p := 4 * runtime.GOMAXPROCS(0)
|
||||
N := int(1e5)
|
||||
if testing.Short() {
|
||||
p /= 2
|
||||
N = 1e3
|
||||
}
|
||||
for _, test := range tests {
|
||||
var v Value
|
||||
done := make(chan bool, p)
|
||||
for i := 0; i < p; i++ {
|
||||
go func() {
|
||||
r := rand.New(rand.NewSource(rand.Int63()))
|
||||
expected := true
|
||||
loop:
|
||||
for j := 0; j < N; j++ {
|
||||
x := test[r.Intn(len(test))]
|
||||
v.Store(x)
|
||||
x = v.Load()
|
||||
for _, x1 := range test {
|
||||
if x == x1 {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
t.Logf("loaded unexpected value %+v, want %+v", x, test)
|
||||
expected = false
|
||||
break
|
||||
}
|
||||
done <- expected
|
||||
}()
|
||||
}
|
||||
for i := 0; i < p; i++ {
|
||||
if !<-done {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkValueRead(b *testing.B) {
|
||||
var v Value
|
||||
v.Store(new(int))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
x := v.Load().(*int)
|
||||
if *x != 0 {
|
||||
b.Fatalf("wrong value: got %v, want 0", *x)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var Value_SwapTests = []struct {
|
||||
init any
|
||||
new any
|
||||
want any
|
||||
err any
|
||||
}{
|
||||
{init: nil, new: nil, err: "sync/atomic: swap of nil value into Value"},
|
||||
{init: nil, new: true, want: nil, err: nil},
|
||||
{init: true, new: "", err: "sync/atomic: swap of inconsistently typed value into Value"},
|
||||
{init: true, new: false, want: true, err: nil},
|
||||
}
|
||||
|
||||
func TestValue_Swap(t *testing.T) {
|
||||
for i, tt := range Value_SwapTests {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
var v Value
|
||||
if tt.init != nil {
|
||||
v.Store(tt.init)
|
||||
}
|
||||
defer func() {
|
||||
err := recover()
|
||||
switch {
|
||||
case tt.err == nil && err != nil:
|
||||
t.Errorf("should not panic, got %v", err)
|
||||
case tt.err != nil && err == nil:
|
||||
t.Errorf("should panic %v, got <nil>", tt.err)
|
||||
}
|
||||
}()
|
||||
if got := v.Swap(tt.new); got != tt.want {
|
||||
t.Errorf("got %v, want %v", got, tt.want)
|
||||
}
|
||||
if got := v.Load(); got != tt.new {
|
||||
t.Errorf("got %v, want %v", got, tt.new)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSwapConcurrent(t *testing.T) {
|
||||
var v Value
|
||||
var count uint64
|
||||
var g sync.WaitGroup
|
||||
var m, n uint64 = 10000, 10000
|
||||
if testing.Short() {
|
||||
m = 1000
|
||||
n = 1000
|
||||
}
|
||||
for i := uint64(0); i < m*n; i += n {
|
||||
i := i
|
||||
g.Add(1)
|
||||
go func() {
|
||||
var c uint64
|
||||
for new := i; new < i+n; new++ {
|
||||
if old := v.Swap(new); old != nil {
|
||||
c += old.(uint64)
|
||||
}
|
||||
}
|
||||
atomic.AddUint64(&count, c)
|
||||
g.Done()
|
||||
}()
|
||||
}
|
||||
g.Wait()
|
||||
if want, got := (m*n-1)*(m*n)/2, count+v.Load().(uint64); got != want {
|
||||
t.Errorf("sum from 0 to %d was %d, want %v", m*n-1, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
var heapA, heapB = struct{ uint }{0}, struct{ uint }{0}
|
||||
|
||||
var Value_CompareAndSwapTests = []struct {
|
||||
init any
|
||||
new any
|
||||
old any
|
||||
want bool
|
||||
err any
|
||||
}{
|
||||
{init: nil, new: nil, old: nil, err: "sync/atomic: compare and swap of nil value into Value"},
|
||||
{init: nil, new: true, old: "", err: "sync/atomic: compare and swap of inconsistently typed values into Value"},
|
||||
{init: nil, new: true, old: true, want: false, err: nil},
|
||||
{init: nil, new: true, old: nil, want: true, err: nil},
|
||||
{init: true, new: "", err: "sync/atomic: compare and swap of inconsistently typed value into Value"},
|
||||
{init: true, new: true, old: false, want: false, err: nil},
|
||||
{init: true, new: true, old: true, want: true, err: nil},
|
||||
{init: heapA, new: struct{ uint }{1}, old: heapB, want: true, err: nil},
|
||||
}
|
||||
|
||||
func TestValue_CompareAndSwap(t *testing.T) {
|
||||
for i, tt := range Value_CompareAndSwapTests {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
var v Value
|
||||
if tt.init != nil {
|
||||
v.Store(tt.init)
|
||||
}
|
||||
defer func() {
|
||||
err := recover()
|
||||
switch {
|
||||
case tt.err == nil && err != nil:
|
||||
t.Errorf("got %v, wanted no panic", err)
|
||||
case tt.err != nil && err == nil:
|
||||
t.Errorf("did not panic, want %v", tt.err)
|
||||
}
|
||||
}()
|
||||
if got := v.CompareAndSwap(tt.old, tt.new); got != tt.want {
|
||||
t.Errorf("got %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueCompareAndSwapConcurrent(t *testing.T) {
|
||||
var v Value
|
||||
var w sync.WaitGroup
|
||||
v.Store(0)
|
||||
m, n := 1000, 100
|
||||
if testing.Short() {
|
||||
m = 100
|
||||
n = 100
|
||||
}
|
||||
for i := 0; i < m; i++ {
|
||||
i := i
|
||||
w.Add(1)
|
||||
go func() {
|
||||
for j := i; j < m*n; runtime.Gosched() {
|
||||
if v.CompareAndSwap(j, j+1) {
|
||||
j += m
|
||||
}
|
||||
}
|
||||
w.Done()
|
||||
}()
|
||||
}
|
||||
w.Wait()
|
||||
if stop := v.Load().(int); stop != m*n {
|
||||
t.Errorf("did not get to %v, stopped at %v", m*n, stop)
|
||||
}
|
||||
}
|
||||
122
src/sync/cond.go
Normal file
122
src/sync/cond.go
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Cond implements a condition variable, a rendezvous point
|
||||
// for goroutines waiting for or announcing the occurrence
|
||||
// of an event.
|
||||
//
|
||||
// Each Cond has an associated Locker L (often a [*Mutex] or [*RWMutex]),
|
||||
// which must be held when changing the condition and
|
||||
// when calling the [Cond.Wait] method.
|
||||
//
|
||||
// A Cond must not be copied after first use.
|
||||
//
|
||||
// In the terminology of [the Go memory model], Cond arranges that
|
||||
// a call to [Cond.Broadcast] or [Cond.Signal] “synchronizes before” any Wait call
|
||||
// that it unblocks.
|
||||
//
|
||||
// For many simple use cases, users will be better off using channels than a
|
||||
// Cond (Broadcast corresponds to closing a channel, and Signal corresponds to
|
||||
// sending on a channel).
|
||||
//
|
||||
// For more on replacements for [sync.Cond], see [Roberto Clapis's series on
|
||||
// advanced concurrency patterns], as well as [Bryan Mills's talk on concurrency
|
||||
// patterns].
|
||||
//
|
||||
// [the Go memory model]: https://go.dev/ref/mem
|
||||
// [Roberto Clapis's series on advanced concurrency patterns]: https://blogtitle.github.io/categories/concurrency/
|
||||
// [Bryan Mills's talk on concurrency patterns]: https://drive.google.com/file/d/1nPdvhB0PutEJzdCq5ms6UI58dp50fcAN/view
|
||||
type Cond struct {
|
||||
noCopy noCopy
|
||||
|
||||
// L is held while observing or changing the condition
|
||||
L Locker
|
||||
|
||||
notify notifyList
|
||||
checker copyChecker
|
||||
}
|
||||
|
||||
// NewCond returns a new Cond with Locker l.
|
||||
func NewCond(l Locker) *Cond {
|
||||
return &Cond{L: l}
|
||||
}
|
||||
|
||||
// Wait atomically unlocks c.L and suspends execution
|
||||
// of the calling goroutine. After later resuming execution,
|
||||
// Wait locks c.L before returning. Unlike in other systems,
|
||||
// Wait cannot return unless awoken by [Cond.Broadcast] or [Cond.Signal].
|
||||
//
|
||||
// Because c.L is not locked while Wait is waiting, the caller
|
||||
// typically cannot assume that the condition is true when
|
||||
// Wait returns. Instead, the caller should Wait in a loop:
|
||||
//
|
||||
// c.L.Lock()
|
||||
// for !condition() {
|
||||
// c.Wait()
|
||||
// }
|
||||
// ... make use of condition ...
|
||||
// c.L.Unlock()
|
||||
func (c *Cond) Wait() {
|
||||
c.checker.check()
|
||||
t := runtime_notifyListAdd(&c.notify)
|
||||
c.L.Unlock()
|
||||
runtime_notifyListWait(&c.notify, t)
|
||||
c.L.Lock()
|
||||
}
|
||||
|
||||
// Signal wakes one goroutine waiting on c, if there is any.
|
||||
//
|
||||
// It is allowed but not required for the caller to hold c.L
|
||||
// during the call.
|
||||
//
|
||||
// Signal() does not affect goroutine scheduling priority; if other goroutines
|
||||
// are attempting to lock c.L, they may be awoken before a "waiting" goroutine.
|
||||
func (c *Cond) Signal() {
|
||||
c.checker.check()
|
||||
runtime_notifyListNotifyOne(&c.notify)
|
||||
}
|
||||
|
||||
// Broadcast wakes all goroutines waiting on c.
|
||||
//
|
||||
// It is allowed but not required for the caller to hold c.L
|
||||
// during the call.
|
||||
func (c *Cond) Broadcast() {
|
||||
c.checker.check()
|
||||
runtime_notifyListNotifyAll(&c.notify)
|
||||
}
|
||||
|
||||
// copyChecker holds back pointer to itself to detect object copying.
|
||||
type copyChecker uintptr
|
||||
|
||||
func (c *copyChecker) check() {
|
||||
// Check if c has been copied in three steps:
|
||||
// 1. The first comparison is the fast-path. If c has been initialized and not copied, this will return immediately. Otherwise, c is either not initialized, or has been copied.
|
||||
// 2. Ensure c is initialized. If the CAS succeeds, we're done. If it fails, c was either initialized concurrently and we simply lost the race, or c has been copied.
|
||||
// 3. Do step 1 again. Now that c is definitely initialized, if this fails, c was copied.
|
||||
if uintptr(*c) != uintptr(unsafe.Pointer(c)) &&
|
||||
!atomic.CompareAndSwapUintptr((*uintptr)(c), 0, uintptr(unsafe.Pointer(c))) &&
|
||||
uintptr(*c) != uintptr(unsafe.Pointer(c)) {
|
||||
panic("sync.Cond is copied")
|
||||
}
|
||||
}
|
||||
|
||||
// noCopy may be added to structs which must not be copied
|
||||
// after the first use.
|
||||
//
|
||||
// See https://golang.org/issues/8005#issuecomment-190753527
|
||||
// for details.
|
||||
//
|
||||
// Note that it must not be embedded, due to the Lock and Unlock methods.
|
||||
type noCopy struct{}
|
||||
|
||||
// Lock is a no-op used by -copylocks checker from `go vet`.
|
||||
func (*noCopy) Lock() {}
|
||||
func (*noCopy) Unlock() {}
|
||||
311
src/sync/cond_test.go
Normal file
311
src/sync/cond_test.go
Normal file
@@ -0,0 +1,311 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
. "sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCondSignal(t *testing.T) {
|
||||
var m Mutex
|
||||
c := NewCond(&m)
|
||||
n := 2
|
||||
running := make(chan bool, n)
|
||||
awake := make(chan bool, n)
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
m.Lock()
|
||||
running <- true
|
||||
c.Wait()
|
||||
awake <- true
|
||||
m.Unlock()
|
||||
}()
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
<-running // Wait for everyone to run.
|
||||
}
|
||||
for n > 0 {
|
||||
select {
|
||||
case <-awake:
|
||||
t.Fatal("goroutine not asleep")
|
||||
default:
|
||||
}
|
||||
m.Lock()
|
||||
c.Signal()
|
||||
m.Unlock()
|
||||
<-awake // Will deadlock if no goroutine wakes up
|
||||
select {
|
||||
case <-awake:
|
||||
t.Fatal("too many goroutines awake")
|
||||
default:
|
||||
}
|
||||
n--
|
||||
}
|
||||
c.Signal()
|
||||
}
|
||||
|
||||
func TestCondSignalGenerations(t *testing.T) {
|
||||
var m Mutex
|
||||
c := NewCond(&m)
|
||||
n := 100
|
||||
running := make(chan bool, n)
|
||||
awake := make(chan int, n)
|
||||
for i := 0; i < n; i++ {
|
||||
go func(i int) {
|
||||
m.Lock()
|
||||
running <- true
|
||||
c.Wait()
|
||||
awake <- i
|
||||
m.Unlock()
|
||||
}(i)
|
||||
if i > 0 {
|
||||
a := <-awake
|
||||
if a != i-1 {
|
||||
t.Fatalf("wrong goroutine woke up: want %d, got %d", i-1, a)
|
||||
}
|
||||
}
|
||||
<-running
|
||||
m.Lock()
|
||||
c.Signal()
|
||||
m.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondBroadcast(t *testing.T) {
|
||||
var m Mutex
|
||||
c := NewCond(&m)
|
||||
n := 200
|
||||
running := make(chan int, n)
|
||||
awake := make(chan int, n)
|
||||
exit := false
|
||||
for i := 0; i < n; i++ {
|
||||
go func(g int) {
|
||||
m.Lock()
|
||||
for !exit {
|
||||
running <- g
|
||||
c.Wait()
|
||||
awake <- g
|
||||
}
|
||||
m.Unlock()
|
||||
}(i)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
for i := 0; i < n; i++ {
|
||||
<-running // Will deadlock unless n are running.
|
||||
}
|
||||
if i == n-1 {
|
||||
m.Lock()
|
||||
exit = true
|
||||
m.Unlock()
|
||||
}
|
||||
select {
|
||||
case <-awake:
|
||||
t.Fatal("goroutine not asleep")
|
||||
default:
|
||||
}
|
||||
m.Lock()
|
||||
c.Broadcast()
|
||||
m.Unlock()
|
||||
seen := make([]bool, n)
|
||||
for i := 0; i < n; i++ {
|
||||
g := <-awake
|
||||
if seen[g] {
|
||||
t.Fatal("goroutine woke up twice")
|
||||
}
|
||||
seen[g] = true
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-running:
|
||||
t.Fatal("goroutine did not exit")
|
||||
default:
|
||||
}
|
||||
c.Broadcast()
|
||||
}
|
||||
|
||||
func TestRace(t *testing.T) {
|
||||
x := 0
|
||||
c := NewCond(&Mutex{})
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
c.L.Lock()
|
||||
x = 1
|
||||
c.Wait()
|
||||
if x != 2 {
|
||||
t.Error("want 2")
|
||||
}
|
||||
x = 3
|
||||
c.Signal()
|
||||
c.L.Unlock()
|
||||
done <- true
|
||||
}()
|
||||
go func() {
|
||||
c.L.Lock()
|
||||
for {
|
||||
if x == 1 {
|
||||
x = 2
|
||||
c.Signal()
|
||||
break
|
||||
}
|
||||
c.L.Unlock()
|
||||
runtime.Gosched()
|
||||
c.L.Lock()
|
||||
}
|
||||
c.L.Unlock()
|
||||
done <- true
|
||||
}()
|
||||
go func() {
|
||||
c.L.Lock()
|
||||
for {
|
||||
if x == 2 {
|
||||
c.Wait()
|
||||
if x != 3 {
|
||||
t.Error("want 3")
|
||||
}
|
||||
break
|
||||
}
|
||||
if x == 3 {
|
||||
break
|
||||
}
|
||||
c.L.Unlock()
|
||||
runtime.Gosched()
|
||||
c.L.Lock()
|
||||
}
|
||||
c.L.Unlock()
|
||||
done <- true
|
||||
}()
|
||||
<-done
|
||||
<-done
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestCondSignalStealing(t *testing.T) {
|
||||
for iters := 0; iters < 1000; iters++ {
|
||||
var m Mutex
|
||||
cond := NewCond(&m)
|
||||
|
||||
// Start a waiter.
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
m.Lock()
|
||||
ch <- struct{}{}
|
||||
cond.Wait()
|
||||
m.Unlock()
|
||||
|
||||
ch <- struct{}{}
|
||||
}()
|
||||
|
||||
<-ch
|
||||
m.Lock()
|
||||
m.Unlock()
|
||||
|
||||
// We know that the waiter is in the cond.Wait() call because we
|
||||
// synchronized with it, then acquired/released the mutex it was
|
||||
// holding when we synchronized.
|
||||
//
|
||||
// Start two goroutines that will race: one will broadcast on
|
||||
// the cond var, the other will wait on it.
|
||||
//
|
||||
// The new waiter may or may not get notified, but the first one
|
||||
// has to be notified.
|
||||
done := false
|
||||
go func() {
|
||||
cond.Broadcast()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
m.Lock()
|
||||
for !done {
|
||||
cond.Wait()
|
||||
}
|
||||
m.Unlock()
|
||||
}()
|
||||
|
||||
// Check that the first waiter does get signaled.
|
||||
<-ch
|
||||
|
||||
// Release the second waiter in case it didn't get the
|
||||
// broadcast.
|
||||
m.Lock()
|
||||
done = true
|
||||
m.Unlock()
|
||||
cond.Broadcast()
|
||||
}
|
||||
}
|
||||
|
||||
func TestCondCopy(t *testing.T) {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err == nil || err.(string) != "sync.Cond is copied" {
|
||||
t.Fatalf("got %v, expect sync.Cond is copied", err)
|
||||
}
|
||||
}()
|
||||
c := Cond{L: &Mutex{}}
|
||||
c.Signal()
|
||||
var c2 Cond
|
||||
reflect.ValueOf(&c2).Elem().Set(reflect.ValueOf(&c).Elem()) // c2 := c, hidden from vet
|
||||
c2.Signal()
|
||||
}
|
||||
|
||||
func BenchmarkCond1(b *testing.B) {
|
||||
benchmarkCond(b, 1)
|
||||
}
|
||||
|
||||
func BenchmarkCond2(b *testing.B) {
|
||||
benchmarkCond(b, 2)
|
||||
}
|
||||
|
||||
func BenchmarkCond4(b *testing.B) {
|
||||
benchmarkCond(b, 4)
|
||||
}
|
||||
|
||||
func BenchmarkCond8(b *testing.B) {
|
||||
benchmarkCond(b, 8)
|
||||
}
|
||||
|
||||
func BenchmarkCond16(b *testing.B) {
|
||||
benchmarkCond(b, 16)
|
||||
}
|
||||
|
||||
func BenchmarkCond32(b *testing.B) {
|
||||
benchmarkCond(b, 32)
|
||||
}
|
||||
|
||||
func benchmarkCond(b *testing.B, waiters int) {
|
||||
c := NewCond(&Mutex{})
|
||||
done := make(chan bool)
|
||||
id := 0
|
||||
|
||||
for routine := 0; routine < waiters+1; routine++ {
|
||||
go func() {
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.L.Lock()
|
||||
if id == -1 {
|
||||
c.L.Unlock()
|
||||
break
|
||||
}
|
||||
id++
|
||||
if id == waiters+1 {
|
||||
id = 0
|
||||
c.Broadcast()
|
||||
} else {
|
||||
c.Wait()
|
||||
}
|
||||
c.L.Unlock()
|
||||
}
|
||||
c.L.Lock()
|
||||
id = -1
|
||||
c.Broadcast()
|
||||
c.L.Unlock()
|
||||
done <- true
|
||||
}()
|
||||
}
|
||||
for routine := 0; routine < waiters+1; routine++ {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
45
src/sync/example_pool_test.go
Normal file
45
src/sync/example_pool_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() any {
|
||||
// The Pool's New function should generally only return pointer
|
||||
// types, since a pointer can be put into the return interface
|
||||
// value without an allocation:
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// timeNow is a fake version of time.Now for tests.
|
||||
func timeNow() time.Time {
|
||||
return time.Unix(1136214245, 0)
|
||||
}
|
||||
|
||||
func Log(w io.Writer, key, val string) {
|
||||
b := bufPool.Get().(*bytes.Buffer)
|
||||
b.Reset()
|
||||
// Replace this with time.Now() in a real logger.
|
||||
b.WriteString(timeNow().UTC().Format(time.RFC3339))
|
||||
b.WriteByte(' ')
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
b.WriteString(val)
|
||||
w.Write(b.Bytes())
|
||||
bufPool.Put(b)
|
||||
}
|
||||
|
||||
func ExamplePool() {
|
||||
Log(os.Stdout, "path", "/search?q=flowers")
|
||||
// Output: 2006-01-02T15:04:05Z path=/search?q=flowers
|
||||
}
|
||||
113
src/sync/example_test.go
Normal file
113
src/sync/example_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type httpPkg struct{}
|
||||
|
||||
func (httpPkg) Get(url string) {}
|
||||
|
||||
var http httpPkg
|
||||
|
||||
// This example fetches several URLs concurrently,
|
||||
// using a WaitGroup to block until all the fetches are complete.
|
||||
func ExampleWaitGroup() {
|
||||
var wg sync.WaitGroup
|
||||
var urls = []string{
|
||||
"http://www.golang.org/",
|
||||
"http://www.google.com/",
|
||||
"http://www.example.com/",
|
||||
}
|
||||
for _, url := range urls {
|
||||
// Increment the WaitGroup counter.
|
||||
wg.Add(1)
|
||||
// Launch a goroutine to fetch the URL.
|
||||
go func(url string) {
|
||||
// Decrement the counter when the goroutine completes.
|
||||
defer wg.Done()
|
||||
// Fetch the URL.
|
||||
http.Get(url)
|
||||
}(url)
|
||||
}
|
||||
// Wait for all HTTP fetches to complete.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func ExampleOnce() {
|
||||
var once sync.Once
|
||||
onceBody := func() {
|
||||
fmt.Println("Only once")
|
||||
}
|
||||
done := make(chan bool)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
once.Do(onceBody)
|
||||
done <- true
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
<-done
|
||||
}
|
||||
// Output:
|
||||
// Only once
|
||||
}
|
||||
|
||||
// This example uses OnceValue to perform an "expensive" computation just once,
|
||||
// even when used concurrently.
|
||||
func ExampleOnceValue() {
|
||||
once := sync.OnceValue(func() int {
|
||||
sum := 0
|
||||
for i := 0; i < 1000; i++ {
|
||||
sum += i
|
||||
}
|
||||
fmt.Println("Computed once:", sum)
|
||||
return sum
|
||||
})
|
||||
done := make(chan bool)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
const want = 499500
|
||||
got := once()
|
||||
if got != want {
|
||||
fmt.Println("want", want, "got", got)
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
<-done
|
||||
}
|
||||
// Output:
|
||||
// Computed once: 499500
|
||||
}
|
||||
|
||||
// This example uses OnceValues to read a file just once.
|
||||
func ExampleOnceValues() {
|
||||
once := sync.OnceValues(func() ([]byte, error) {
|
||||
fmt.Println("Reading file once")
|
||||
return os.ReadFile("example_test.go")
|
||||
})
|
||||
done := make(chan bool)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
data, err := once()
|
||||
if err != nil {
|
||||
fmt.Println("error:", err)
|
||||
}
|
||||
_ = data // Ignore the data for this example
|
||||
done <- true
|
||||
}()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
<-done
|
||||
}
|
||||
// Output:
|
||||
// Reading file once
|
||||
}
|
||||
57
src/sync/export_test.go
Normal file
57
src/sync/export_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
// Export for testing.
|
||||
var Runtime_Semacquire = runtime_Semacquire
|
||||
var Runtime_Semrelease = runtime_Semrelease
|
||||
var Runtime_procPin = runtime_procPin
|
||||
var Runtime_procUnpin = runtime_procUnpin
|
||||
|
||||
// PoolDequeue exports an interface for pollDequeue testing.
|
||||
type PoolDequeue interface {
|
||||
PushHead(val any) bool
|
||||
PopHead() (any, bool)
|
||||
PopTail() (any, bool)
|
||||
}
|
||||
|
||||
func NewPoolDequeue(n int) PoolDequeue {
|
||||
d := &poolDequeue{
|
||||
vals: make([]eface, n),
|
||||
}
|
||||
// For testing purposes, set the head and tail indexes close
|
||||
// to wrapping around.
|
||||
d.headTail.Store(d.pack(1<<dequeueBits-500, 1<<dequeueBits-500))
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *poolDequeue) PushHead(val any) bool {
|
||||
return d.pushHead(val)
|
||||
}
|
||||
|
||||
func (d *poolDequeue) PopHead() (any, bool) {
|
||||
return d.popHead()
|
||||
}
|
||||
|
||||
func (d *poolDequeue) PopTail() (any, bool) {
|
||||
return d.popTail()
|
||||
}
|
||||
|
||||
func NewPoolChain() PoolDequeue {
|
||||
return new(poolChain)
|
||||
}
|
||||
|
||||
func (c *poolChain) PushHead(val any) bool {
|
||||
c.pushHead(val)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *poolChain) PopHead() (any, bool) {
|
||||
return c.popHead()
|
||||
}
|
||||
|
||||
func (c *poolChain) PopTail() (any, bool) {
|
||||
return c.popTail()
|
||||
}
|
||||
540
src/sync/map.go
Normal file
540
src/sync/map.go
Normal file
@@ -0,0 +1,540 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Map is like a Go map[any]any but is safe for concurrent use
|
||||
// by multiple goroutines without additional locking or coordination.
|
||||
// Loads, stores, and deletes run in amortized constant time.
|
||||
//
|
||||
// The Map type is specialized. Most code should use a plain Go map instead,
|
||||
// with separate locking or coordination, for better type safety and to make it
|
||||
// easier to maintain other invariants along with the map content.
|
||||
//
|
||||
// The Map type is optimized for two common use cases: (1) when the entry for a given
|
||||
// key is only ever written once but read many times, as in caches that only grow,
|
||||
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
|
||||
// sets of keys. In these two cases, use of a Map may significantly reduce lock
|
||||
// contention compared to a Go map paired with a separate [Mutex] or [RWMutex].
|
||||
//
|
||||
// The zero Map is empty and ready for use. A Map must not be copied after first use.
|
||||
//
|
||||
// In the terminology of [the Go memory model], Map arranges that a write operation
|
||||
// “synchronizes before” any read operation that observes the effect of the write, where
|
||||
// read and write operations are defined as follows.
|
||||
// [Map.Load], [Map.LoadAndDelete], [Map.LoadOrStore], [Map.Swap], [Map.CompareAndSwap],
|
||||
// and [Map.CompareAndDelete] are read operations;
|
||||
// [Map.Delete], [Map.LoadAndDelete], [Map.Store], and [Map.Swap] are write operations;
|
||||
// [Map.LoadOrStore] is a write operation when it returns loaded set to false;
|
||||
// [Map.CompareAndSwap] is a write operation when it returns swapped set to true;
|
||||
// and [Map.CompareAndDelete] is a write operation when it returns deleted set to true.
|
||||
//
|
||||
// [the Go memory model]: https://go.dev/ref/mem
|
||||
type Map struct {
|
||||
mu Mutex
|
||||
|
||||
// read contains the portion of the map's contents that are safe for
|
||||
// concurrent access (with or without mu held).
|
||||
//
|
||||
// The read field itself is always safe to load, but must only be stored with
|
||||
// mu held.
|
||||
//
|
||||
// Entries stored in read may be updated concurrently without mu, but updating
|
||||
// a previously-expunged entry requires that the entry be copied to the dirty
|
||||
// map and unexpunged with mu held.
|
||||
read atomic.Pointer[readOnly]
|
||||
|
||||
// dirty contains the portion of the map's contents that require mu to be
|
||||
// held. To ensure that the dirty map can be promoted to the read map quickly,
|
||||
// it also includes all of the non-expunged entries in the read map.
|
||||
//
|
||||
// Expunged entries are not stored in the dirty map. An expunged entry in the
|
||||
// clean map must be unexpunged and added to the dirty map before a new value
|
||||
// can be stored to it.
|
||||
//
|
||||
// If the dirty map is nil, the next write to the map will initialize it by
|
||||
// making a shallow copy of the clean map, omitting stale entries.
|
||||
dirty map[any]*entry
|
||||
|
||||
// misses counts the number of loads since the read map was last updated that
|
||||
// needed to lock mu to determine whether the key was present.
|
||||
//
|
||||
// Once enough misses have occurred to cover the cost of copying the dirty
|
||||
// map, the dirty map will be promoted to the read map (in the unamended
|
||||
// state) and the next store to the map will make a new dirty copy.
|
||||
misses int
|
||||
}
|
||||
|
||||
// readOnly is an immutable struct stored atomically in the Map.read field.
|
||||
type readOnly struct {
|
||||
m map[any]*entry
|
||||
amended bool // true if the dirty map contains some key not in m.
|
||||
}
|
||||
|
||||
// expunged is an arbitrary pointer that marks entries which have been deleted
|
||||
// from the dirty map.
|
||||
var expunged = new(any)
|
||||
|
||||
// An entry is a slot in the map corresponding to a particular key.
|
||||
type entry struct {
|
||||
// p points to the interface{} value stored for the entry.
|
||||
//
|
||||
// If p == nil, the entry has been deleted, and either m.dirty == nil or
|
||||
// m.dirty[key] is e.
|
||||
//
|
||||
// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
|
||||
// is missing from m.dirty.
|
||||
//
|
||||
// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
|
||||
// != nil, in m.dirty[key].
|
||||
//
|
||||
// An entry can be deleted by atomic replacement with nil: when m.dirty is
|
||||
// next created, it will atomically replace nil with expunged and leave
|
||||
// m.dirty[key] unset.
|
||||
//
|
||||
// An entry's associated value can be updated by atomic replacement, provided
|
||||
// p != expunged. If p == expunged, an entry's associated value can be updated
|
||||
// only after first setting m.dirty[key] = e so that lookups using the dirty
|
||||
// map find the entry.
|
||||
p atomic.Pointer[any]
|
||||
}
|
||||
|
||||
func newEntry(i any) *entry {
|
||||
e := &entry{}
|
||||
e.p.Store(&i)
|
||||
return e
|
||||
}
|
||||
|
||||
func (m *Map) loadReadOnly() readOnly {
|
||||
if p := m.read.Load(); p != nil {
|
||||
return *p
|
||||
}
|
||||
return readOnly{}
|
||||
}
|
||||
|
||||
// Load returns the value stored in the map for a key, or nil if no
|
||||
// value is present.
|
||||
// The ok result indicates whether value was found in the map.
|
||||
func (m *Map) Load(key any) (value any, ok bool) {
|
||||
read := m.loadReadOnly()
|
||||
e, ok := read.m[key]
|
||||
if !ok && read.amended {
|
||||
m.mu.Lock()
|
||||
// Avoid reporting a spurious miss if m.dirty got promoted while we were
|
||||
// blocked on m.mu. (If further loads of the same key will not miss, it's
|
||||
// not worth copying the dirty map for this key.)
|
||||
read = m.loadReadOnly()
|
||||
e, ok = read.m[key]
|
||||
if !ok && read.amended {
|
||||
e, ok = m.dirty[key]
|
||||
// Regardless of whether the entry was present, record a miss: this key
|
||||
// will take the slow path until the dirty map is promoted to the read
|
||||
// map.
|
||||
m.missLocked()
|
||||
}
|
||||
m.mu.Unlock()
|
||||
}
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return e.load()
|
||||
}
|
||||
|
||||
func (e *entry) load() (value any, ok bool) {
|
||||
p := e.p.Load()
|
||||
if p == nil || p == expunged {
|
||||
return nil, false
|
||||
}
|
||||
return *p, true
|
||||
}
|
||||
|
||||
// Store sets the value for a key.
|
||||
func (m *Map) Store(key, value any) {
|
||||
_, _ = m.Swap(key, value)
|
||||
}
|
||||
|
||||
// Clear deletes all the entries, resulting in an empty Map.
|
||||
func (m *Map) Clear() {
|
||||
read := m.loadReadOnly()
|
||||
if len(read.m) == 0 && !read.amended {
|
||||
// Avoid allocating a new readOnly when the map is already clear.
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
read = m.loadReadOnly()
|
||||
if len(read.m) > 0 || read.amended {
|
||||
m.read.Store(&readOnly{})
|
||||
}
|
||||
|
||||
clear(m.dirty)
|
||||
// Don't immediately promote the newly-cleared dirty map on the next operation.
|
||||
m.misses = 0
|
||||
}
|
||||
|
||||
// tryCompareAndSwap compare the entry with the given old value and swaps
|
||||
// it with a new value if the entry is equal to the old value, and the entry
|
||||
// has not been expunged.
|
||||
//
|
||||
// If the entry is expunged, tryCompareAndSwap returns false and leaves
|
||||
// the entry unchanged.
|
||||
func (e *entry) tryCompareAndSwap(old, new any) bool {
|
||||
p := e.p.Load()
|
||||
if p == nil || p == expunged || *p != old {
|
||||
return false
|
||||
}
|
||||
|
||||
// Copy the interface after the first load to make this method more amenable
|
||||
// to escape analysis: if the comparison fails from the start, we shouldn't
|
||||
// bother heap-allocating an interface value to store.
|
||||
nc := new
|
||||
for {
|
||||
if e.p.CompareAndSwap(p, &nc) {
|
||||
return true
|
||||
}
|
||||
p = e.p.Load()
|
||||
if p == nil || p == expunged || *p != old {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unexpungeLocked ensures that the entry is not marked as expunged.
|
||||
//
|
||||
// If the entry was previously expunged, it must be added to the dirty map
|
||||
// before m.mu is unlocked.
|
||||
func (e *entry) unexpungeLocked() (wasExpunged bool) {
|
||||
return e.p.CompareAndSwap(expunged, nil)
|
||||
}
|
||||
|
||||
// swapLocked unconditionally swaps a value into the entry.
|
||||
//
|
||||
// The entry must be known not to be expunged.
|
||||
func (e *entry) swapLocked(i *any) *any {
|
||||
return e.p.Swap(i)
|
||||
}
|
||||
|
||||
// LoadOrStore returns the existing value for the key if present.
|
||||
// Otherwise, it stores and returns the given value.
|
||||
// The loaded result is true if the value was loaded, false if stored.
|
||||
func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
|
||||
// Avoid locking if it's a clean hit.
|
||||
read := m.loadReadOnly()
|
||||
if e, ok := read.m[key]; ok {
|
||||
actual, loaded, ok := e.tryLoadOrStore(value)
|
||||
if ok {
|
||||
return actual, loaded
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
read = m.loadReadOnly()
|
||||
if e, ok := read.m[key]; ok {
|
||||
if e.unexpungeLocked() {
|
||||
m.dirty[key] = e
|
||||
}
|
||||
actual, loaded, _ = e.tryLoadOrStore(value)
|
||||
} else if e, ok := m.dirty[key]; ok {
|
||||
actual, loaded, _ = e.tryLoadOrStore(value)
|
||||
m.missLocked()
|
||||
} else {
|
||||
if !read.amended {
|
||||
// We're adding the first new key to the dirty map.
|
||||
// Make sure it is allocated and mark the read-only map as incomplete.
|
||||
m.dirtyLocked()
|
||||
m.read.Store(&readOnly{m: read.m, amended: true})
|
||||
}
|
||||
m.dirty[key] = newEntry(value)
|
||||
actual, loaded = value, false
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
return actual, loaded
|
||||
}
|
||||
|
||||
// tryLoadOrStore atomically loads or stores a value if the entry is not
|
||||
// expunged.
|
||||
//
|
||||
// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
|
||||
// returns with ok==false.
|
||||
func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) {
|
||||
p := e.p.Load()
|
||||
if p == expunged {
|
||||
return nil, false, false
|
||||
}
|
||||
if p != nil {
|
||||
return *p, true, true
|
||||
}
|
||||
|
||||
// Copy the interface after the first load to make this method more amenable
|
||||
// to escape analysis: if we hit the "load" path or the entry is expunged, we
|
||||
// shouldn't bother heap-allocating.
|
||||
ic := i
|
||||
for {
|
||||
if e.p.CompareAndSwap(nil, &ic) {
|
||||
return i, false, true
|
||||
}
|
||||
p = e.p.Load()
|
||||
if p == expunged {
|
||||
return nil, false, false
|
||||
}
|
||||
if p != nil {
|
||||
return *p, true, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LoadAndDelete deletes the value for a key, returning the previous value if any.
|
||||
// The loaded result reports whether the key was present.
|
||||
func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
|
||||
read := m.loadReadOnly()
|
||||
e, ok := read.m[key]
|
||||
if !ok && read.amended {
|
||||
m.mu.Lock()
|
||||
read = m.loadReadOnly()
|
||||
e, ok = read.m[key]
|
||||
if !ok && read.amended {
|
||||
e, ok = m.dirty[key]
|
||||
delete(m.dirty, key)
|
||||
// Regardless of whether the entry was present, record a miss: this key
|
||||
// will take the slow path until the dirty map is promoted to the read
|
||||
// map.
|
||||
m.missLocked()
|
||||
}
|
||||
m.mu.Unlock()
|
||||
}
|
||||
if ok {
|
||||
return e.delete()
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Delete deletes the value for a key.
|
||||
func (m *Map) Delete(key any) {
|
||||
m.LoadAndDelete(key)
|
||||
}
|
||||
|
||||
func (e *entry) delete() (value any, ok bool) {
|
||||
for {
|
||||
p := e.p.Load()
|
||||
if p == nil || p == expunged {
|
||||
return nil, false
|
||||
}
|
||||
if e.p.CompareAndSwap(p, nil) {
|
||||
return *p, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// trySwap swaps a value if the entry has not been expunged.
|
||||
//
|
||||
// If the entry is expunged, trySwap returns false and leaves the entry
|
||||
// unchanged.
|
||||
func (e *entry) trySwap(i *any) (*any, bool) {
|
||||
for {
|
||||
p := e.p.Load()
|
||||
if p == expunged {
|
||||
return nil, false
|
||||
}
|
||||
if e.p.CompareAndSwap(p, i) {
|
||||
return p, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Swap swaps the value for a key and returns the previous value if any.
|
||||
// The loaded result reports whether the key was present.
|
||||
func (m *Map) Swap(key, value any) (previous any, loaded bool) {
|
||||
read := m.loadReadOnly()
|
||||
if e, ok := read.m[key]; ok {
|
||||
if v, ok := e.trySwap(&value); ok {
|
||||
if v == nil {
|
||||
return nil, false
|
||||
}
|
||||
return *v, true
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
read = m.loadReadOnly()
|
||||
if e, ok := read.m[key]; ok {
|
||||
if e.unexpungeLocked() {
|
||||
// The entry was previously expunged, which implies that there is a
|
||||
// non-nil dirty map and this entry is not in it.
|
||||
m.dirty[key] = e
|
||||
}
|
||||
if v := e.swapLocked(&value); v != nil {
|
||||
loaded = true
|
||||
previous = *v
|
||||
}
|
||||
} else if e, ok := m.dirty[key]; ok {
|
||||
if v := e.swapLocked(&value); v != nil {
|
||||
loaded = true
|
||||
previous = *v
|
||||
}
|
||||
} else {
|
||||
if !read.amended {
|
||||
// We're adding the first new key to the dirty map.
|
||||
// Make sure it is allocated and mark the read-only map as incomplete.
|
||||
m.dirtyLocked()
|
||||
m.read.Store(&readOnly{m: read.m, amended: true})
|
||||
}
|
||||
m.dirty[key] = newEntry(value)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
return previous, loaded
|
||||
}
|
||||
|
||||
// CompareAndSwap swaps the old and new values for key
|
||||
// if the value stored in the map is equal to old.
|
||||
// The old value must be of a comparable type.
|
||||
func (m *Map) CompareAndSwap(key, old, new any) (swapped bool) {
|
||||
read := m.loadReadOnly()
|
||||
if e, ok := read.m[key]; ok {
|
||||
return e.tryCompareAndSwap(old, new)
|
||||
} else if !read.amended {
|
||||
return false // No existing value for key.
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
read = m.loadReadOnly()
|
||||
swapped = false
|
||||
if e, ok := read.m[key]; ok {
|
||||
swapped = e.tryCompareAndSwap(old, new)
|
||||
} else if e, ok := m.dirty[key]; ok {
|
||||
swapped = e.tryCompareAndSwap(old, new)
|
||||
// We needed to lock mu in order to load the entry for key,
|
||||
// and the operation didn't change the set of keys in the map
|
||||
// (so it would be made more efficient by promoting the dirty
|
||||
// map to read-only).
|
||||
// Count it as a miss so that we will eventually switch to the
|
||||
// more efficient steady state.
|
||||
m.missLocked()
|
||||
}
|
||||
return swapped
|
||||
}
|
||||
|
||||
// CompareAndDelete deletes the entry for key if its value is equal to old.
|
||||
// The old value must be of a comparable type.
|
||||
//
|
||||
// If there is no current value for key in the map, CompareAndDelete
|
||||
// returns false (even if the old value is the nil interface value).
|
||||
func (m *Map) CompareAndDelete(key, old any) (deleted bool) {
|
||||
read := m.loadReadOnly()
|
||||
e, ok := read.m[key]
|
||||
if !ok && read.amended {
|
||||
m.mu.Lock()
|
||||
read = m.loadReadOnly()
|
||||
e, ok = read.m[key]
|
||||
if !ok && read.amended {
|
||||
e, ok = m.dirty[key]
|
||||
// Don't delete key from m.dirty: we still need to do the “compare” part
|
||||
// of the operation. The entry will eventually be expunged when the
|
||||
// dirty map is promoted to the read map.
|
||||
//
|
||||
// Regardless of whether the entry was present, record a miss: this key
|
||||
// will take the slow path until the dirty map is promoted to the read
|
||||
// map.
|
||||
m.missLocked()
|
||||
}
|
||||
m.mu.Unlock()
|
||||
}
|
||||
for ok {
|
||||
p := e.p.Load()
|
||||
if p == nil || p == expunged || *p != old {
|
||||
return false
|
||||
}
|
||||
if e.p.CompareAndSwap(p, nil) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Range calls f sequentially for each key and value present in the map.
|
||||
// If f returns false, range stops the iteration.
|
||||
//
|
||||
// Range does not necessarily correspond to any consistent snapshot of the Map's
|
||||
// contents: no key will be visited more than once, but if the value for any key
|
||||
// is stored or deleted concurrently (including by f), Range may reflect any
|
||||
// mapping for that key from any point during the Range call. Range does not
|
||||
// block other methods on the receiver; even f itself may call any method on m.
|
||||
//
|
||||
// Range may be O(N) with the number of elements in the map even if f returns
|
||||
// false after a constant number of calls.
|
||||
func (m *Map) Range(f func(key, value any) bool) {
|
||||
// We need to be able to iterate over all of the keys that were already
|
||||
// present at the start of the call to Range.
|
||||
// If read.amended is false, then read.m satisfies that property without
|
||||
// requiring us to hold m.mu for a long time.
|
||||
read := m.loadReadOnly()
|
||||
if read.amended {
|
||||
// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
|
||||
// (assuming the caller does not break out early), so a call to Range
|
||||
// amortizes an entire copy of the map: we can promote the dirty copy
|
||||
// immediately!
|
||||
m.mu.Lock()
|
||||
read = m.loadReadOnly()
|
||||
if read.amended {
|
||||
read = readOnly{m: m.dirty}
|
||||
copyRead := read
|
||||
m.read.Store(©Read)
|
||||
m.dirty = nil
|
||||
m.misses = 0
|
||||
}
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
for k, e := range read.m {
|
||||
v, ok := e.load()
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if !f(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Map) missLocked() {
|
||||
m.misses++
|
||||
if m.misses < len(m.dirty) {
|
||||
return
|
||||
}
|
||||
m.read.Store(&readOnly{m: m.dirty})
|
||||
m.dirty = nil
|
||||
m.misses = 0
|
||||
}
|
||||
|
||||
func (m *Map) dirtyLocked() {
|
||||
if m.dirty != nil {
|
||||
return
|
||||
}
|
||||
|
||||
read := m.loadReadOnly()
|
||||
m.dirty = make(map[any]*entry, len(read.m))
|
||||
for k, e := range read.m {
|
||||
if !e.tryExpungeLocked() {
|
||||
m.dirty[k] = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *entry) tryExpungeLocked() (isExpunged bool) {
|
||||
p := e.p.Load()
|
||||
for p == nil {
|
||||
if e.p.CompareAndSwap(nil, expunged) {
|
||||
return true
|
||||
}
|
||||
p = e.p.Load()
|
||||
}
|
||||
return p == expunged
|
||||
}
|
||||
547
src/sync/map_bench_test.go
Normal file
547
src/sync/map_bench_test.go
Normal file
@@ -0,0 +1,547 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type bench struct {
|
||||
setup func(*testing.B, mapInterface)
|
||||
perG func(b *testing.B, pb *testing.PB, i int, m mapInterface)
|
||||
}
|
||||
|
||||
func benchMap(b *testing.B, bench bench) {
|
||||
for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} {
|
||||
b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
|
||||
m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
|
||||
if bench.setup != nil {
|
||||
bench.setup(b, m)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
var i int64
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
id := int(atomic.AddInt64(&i, 1) - 1)
|
||||
bench.perG(b, pb, id*b.N, m)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLoadMostlyHits(b *testing.B) {
|
||||
const hits, misses = 1023, 1
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.Load(i % (hits + misses))
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkLoadMostlyMisses(b *testing.B) {
|
||||
const hits, misses = 1, 1023
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.Load(i % (hits + misses))
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkLoadOrStoreBalanced(b *testing.B) {
|
||||
const hits, misses = 128, 128
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(b *testing.B, m mapInterface) {
|
||||
if _, ok := m.(*DeepCopyMap); ok {
|
||||
b.Skip("DeepCopyMap has quadratic running time.")
|
||||
}
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
j := i % (hits + misses)
|
||||
if j < hits {
|
||||
if _, ok := m.LoadOrStore(j, i); !ok {
|
||||
b.Fatalf("unexpected miss for %v", j)
|
||||
}
|
||||
} else {
|
||||
if v, loaded := m.LoadOrStore(i, i); loaded {
|
||||
b.Fatalf("failed to store %v: existing value %v", i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkLoadOrStoreUnique(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(b *testing.B, m mapInterface) {
|
||||
if _, ok := m.(*DeepCopyMap); ok {
|
||||
b.Skip("DeepCopyMap has quadratic running time.")
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkLoadOrStoreCollision(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
m.LoadOrStore(0, 0)
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.LoadOrStore(0, 0)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkLoadAndDeleteBalanced(b *testing.B) {
|
||||
const hits, misses = 128, 128
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(b *testing.B, m mapInterface) {
|
||||
if _, ok := m.(*DeepCopyMap); ok {
|
||||
b.Skip("DeepCopyMap has quadratic running time.")
|
||||
}
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
j := i % (hits + misses)
|
||||
if j < hits {
|
||||
m.LoadAndDelete(j)
|
||||
} else {
|
||||
m.LoadAndDelete(i)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkLoadAndDeleteUnique(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(b *testing.B, m mapInterface) {
|
||||
if _, ok := m.(*DeepCopyMap); ok {
|
||||
b.Skip("DeepCopyMap has quadratic running time.")
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.LoadAndDelete(i)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkLoadAndDeleteCollision(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
m.LoadOrStore(0, 0)
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
if _, loaded := m.LoadAndDelete(0); loaded {
|
||||
m.Store(0, 0)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkRange(b *testing.B) {
|
||||
const mapSize = 1 << 10
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
for i := 0; i < mapSize; i++ {
|
||||
m.Store(i, i)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.Range(func(_, _ any) bool { return true })
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// BenchmarkAdversarialAlloc tests performance when we store a new value
|
||||
// immediately whenever the map is promoted to clean and otherwise load a
|
||||
// unique, missing key.
|
||||
//
|
||||
// This forces the Load calls to always acquire the map's mutex.
|
||||
func BenchmarkAdversarialAlloc(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
var stores, loadsSinceStore int64
|
||||
for ; pb.Next(); i++ {
|
||||
m.Load(i)
|
||||
if loadsSinceStore++; loadsSinceStore > stores {
|
||||
m.LoadOrStore(i, stores)
|
||||
loadsSinceStore = 0
|
||||
stores++
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// BenchmarkAdversarialDelete tests performance when we periodically delete
|
||||
// one key and add a different one in a large map.
|
||||
//
|
||||
// This forces the Load calls to always acquire the map's mutex and periodically
|
||||
// makes a full copy of the map despite changing only one entry.
|
||||
func BenchmarkAdversarialDelete(b *testing.B) {
|
||||
const mapSize = 1 << 10
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
for i := 0; i < mapSize; i++ {
|
||||
m.Store(i, i)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.Load(i)
|
||||
|
||||
if i%mapSize == 0 {
|
||||
m.Range(func(k, _ any) bool {
|
||||
m.Delete(k)
|
||||
return false
|
||||
})
|
||||
m.Store(i, i)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDeleteCollision(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
m.LoadOrStore(0, 0)
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.Delete(0)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkSwapCollision(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
m.LoadOrStore(0, 0)
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.Swap(0, 0)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkSwapMostlyHits(b *testing.B) {
|
||||
const hits, misses = 1023, 1
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
if i%(hits+misses) < hits {
|
||||
v := i % (hits + misses)
|
||||
m.Swap(v, v)
|
||||
} else {
|
||||
m.Swap(i, i)
|
||||
m.Delete(i)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkSwapMostlyMisses(b *testing.B) {
|
||||
const hits, misses = 1, 1023
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
if i%(hits+misses) < hits {
|
||||
v := i % (hits + misses)
|
||||
m.Swap(v, v)
|
||||
} else {
|
||||
m.Swap(i, i)
|
||||
m.Delete(i)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCompareAndSwapCollision(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
m.LoadOrStore(0, 0)
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for pb.Next() {
|
||||
if m.CompareAndSwap(0, 0, 42) {
|
||||
m.CompareAndSwap(0, 42, 0)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCompareAndSwapNoExistingKey(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
if m.CompareAndSwap(i, 0, 0) {
|
||||
m.Delete(i)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCompareAndSwapValueNotEqual(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
m.Store(0, 0)
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
m.CompareAndSwap(0, 1, 2)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCompareAndSwapMostlyHits(b *testing.B) {
|
||||
const hits, misses = 1023, 1
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(b *testing.B, m mapInterface) {
|
||||
if _, ok := m.(*DeepCopyMap); ok {
|
||||
b.Skip("DeepCopyMap has quadratic running time.")
|
||||
}
|
||||
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
v := i
|
||||
if i%(hits+misses) < hits {
|
||||
v = i % (hits + misses)
|
||||
}
|
||||
m.CompareAndSwap(v, v, v)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCompareAndSwapMostlyMisses(b *testing.B) {
|
||||
const hits, misses = 1, 1023
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
v := i
|
||||
if i%(hits+misses) < hits {
|
||||
v = i % (hits + misses)
|
||||
}
|
||||
m.CompareAndSwap(v, v, v)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCompareAndDeleteCollision(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
m.LoadOrStore(0, 0)
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
if m.CompareAndDelete(0, 0) {
|
||||
m.Store(0, 0)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCompareAndDeleteMostlyHits(b *testing.B) {
|
||||
const hits, misses = 1023, 1
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(b *testing.B, m mapInterface) {
|
||||
if _, ok := m.(*DeepCopyMap); ok {
|
||||
b.Skip("DeepCopyMap has quadratic running time.")
|
||||
}
|
||||
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
v := i
|
||||
if i%(hits+misses) < hits {
|
||||
v = i % (hits + misses)
|
||||
}
|
||||
if m.CompareAndDelete(v, v) {
|
||||
m.Store(v, v)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCompareAndDeleteMostlyMisses(b *testing.B) {
|
||||
const hits, misses = 1, 1023
|
||||
|
||||
benchMap(b, bench{
|
||||
setup: func(_ *testing.B, m mapInterface) {
|
||||
for i := 0; i < hits; i++ {
|
||||
m.LoadOrStore(i, i)
|
||||
}
|
||||
// Prime the map to get it into a steady state.
|
||||
for i := 0; i < hits*2; i++ {
|
||||
m.Load(i % hits)
|
||||
}
|
||||
},
|
||||
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
v := i
|
||||
if i%(hits+misses) < hits {
|
||||
v = i % (hits + misses)
|
||||
}
|
||||
if m.CompareAndDelete(v, v) {
|
||||
m.Store(v, v)
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkClear(b *testing.B) {
|
||||
benchMap(b, bench{
|
||||
perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
|
||||
for ; pb.Next(); i++ {
|
||||
k, v := i%256, i%256
|
||||
m.Clear()
|
||||
m.Store(k, v)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
286
src/sync/map_reference_test.go
Normal file
286
src/sync/map_reference_test.go
Normal file
@@ -0,0 +1,286 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// This file contains reference map implementations for unit-tests.
|
||||
|
||||
// mapInterface is the interface Map implements.
|
||||
type mapInterface interface {
|
||||
Load(key any) (value any, ok bool)
|
||||
Store(key, value any)
|
||||
LoadOrStore(key, value any) (actual any, loaded bool)
|
||||
LoadAndDelete(key any) (value any, loaded bool)
|
||||
Delete(any)
|
||||
Swap(key, value any) (previous any, loaded bool)
|
||||
CompareAndSwap(key, old, new any) (swapped bool)
|
||||
CompareAndDelete(key, old any) (deleted bool)
|
||||
Range(func(key, value any) (shouldContinue bool))
|
||||
Clear()
|
||||
}
|
||||
|
||||
var (
|
||||
_ mapInterface = &RWMutexMap{}
|
||||
_ mapInterface = &DeepCopyMap{}
|
||||
)
|
||||
|
||||
// RWMutexMap is an implementation of mapInterface using a sync.RWMutex.
|
||||
type RWMutexMap struct {
|
||||
mu sync.RWMutex
|
||||
dirty map[any]any
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) Load(key any) (value any, ok bool) {
|
||||
m.mu.RLock()
|
||||
value, ok = m.dirty[key]
|
||||
m.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) Store(key, value any) {
|
||||
m.mu.Lock()
|
||||
if m.dirty == nil {
|
||||
m.dirty = make(map[any]any)
|
||||
}
|
||||
m.dirty[key] = value
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) LoadOrStore(key, value any) (actual any, loaded bool) {
|
||||
m.mu.Lock()
|
||||
actual, loaded = m.dirty[key]
|
||||
if !loaded {
|
||||
actual = value
|
||||
if m.dirty == nil {
|
||||
m.dirty = make(map[any]any)
|
||||
}
|
||||
m.dirty[key] = value
|
||||
}
|
||||
m.mu.Unlock()
|
||||
return actual, loaded
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) Swap(key, value any) (previous any, loaded bool) {
|
||||
m.mu.Lock()
|
||||
if m.dirty == nil {
|
||||
m.dirty = make(map[any]any)
|
||||
}
|
||||
|
||||
previous, loaded = m.dirty[key]
|
||||
m.dirty[key] = value
|
||||
m.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) LoadAndDelete(key any) (value any, loaded bool) {
|
||||
m.mu.Lock()
|
||||
value, loaded = m.dirty[key]
|
||||
if !loaded {
|
||||
m.mu.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
delete(m.dirty, key)
|
||||
m.mu.Unlock()
|
||||
return value, loaded
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) Delete(key any) {
|
||||
m.mu.Lock()
|
||||
delete(m.dirty, key)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) CompareAndSwap(key, old, new any) (swapped bool) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if m.dirty == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
value, loaded := m.dirty[key]
|
||||
if loaded && value == old {
|
||||
m.dirty[key] = new
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) CompareAndDelete(key, old any) (deleted bool) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if m.dirty == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
value, loaded := m.dirty[key]
|
||||
if loaded && value == old {
|
||||
delete(m.dirty, key)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) Range(f func(key, value any) (shouldContinue bool)) {
|
||||
m.mu.RLock()
|
||||
keys := make([]any, 0, len(m.dirty))
|
||||
for k := range m.dirty {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
for _, k := range keys {
|
||||
v, ok := m.Load(k)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if !f(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *RWMutexMap) Clear() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
clear(m.dirty)
|
||||
}
|
||||
|
||||
// DeepCopyMap is an implementation of mapInterface using a Mutex and
|
||||
// atomic.Value. It makes deep copies of the map on every write to avoid
|
||||
// acquiring the Mutex in Load.
|
||||
type DeepCopyMap struct {
|
||||
mu sync.Mutex
|
||||
clean atomic.Value
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) Load(key any) (value any, ok bool) {
|
||||
clean, _ := m.clean.Load().(map[any]any)
|
||||
value, ok = clean[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) Store(key, value any) {
|
||||
m.mu.Lock()
|
||||
dirty := m.dirty()
|
||||
dirty[key] = value
|
||||
m.clean.Store(dirty)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) LoadOrStore(key, value any) (actual any, loaded bool) {
|
||||
clean, _ := m.clean.Load().(map[any]any)
|
||||
actual, loaded = clean[key]
|
||||
if loaded {
|
||||
return actual, loaded
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
// Reload clean in case it changed while we were waiting on m.mu.
|
||||
clean, _ = m.clean.Load().(map[any]any)
|
||||
actual, loaded = clean[key]
|
||||
if !loaded {
|
||||
dirty := m.dirty()
|
||||
dirty[key] = value
|
||||
actual = value
|
||||
m.clean.Store(dirty)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
return actual, loaded
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) Swap(key, value any) (previous any, loaded bool) {
|
||||
m.mu.Lock()
|
||||
dirty := m.dirty()
|
||||
previous, loaded = dirty[key]
|
||||
dirty[key] = value
|
||||
m.clean.Store(dirty)
|
||||
m.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) LoadAndDelete(key any) (value any, loaded bool) {
|
||||
m.mu.Lock()
|
||||
dirty := m.dirty()
|
||||
value, loaded = dirty[key]
|
||||
delete(dirty, key)
|
||||
m.clean.Store(dirty)
|
||||
m.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) Delete(key any) {
|
||||
m.mu.Lock()
|
||||
dirty := m.dirty()
|
||||
delete(dirty, key)
|
||||
m.clean.Store(dirty)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) CompareAndSwap(key, old, new any) (swapped bool) {
|
||||
clean, _ := m.clean.Load().(map[any]any)
|
||||
if previous, ok := clean[key]; !ok || previous != old {
|
||||
return false
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
dirty := m.dirty()
|
||||
value, loaded := dirty[key]
|
||||
if loaded && value == old {
|
||||
dirty[key] = new
|
||||
m.clean.Store(dirty)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) CompareAndDelete(key, old any) (deleted bool) {
|
||||
clean, _ := m.clean.Load().(map[any]any)
|
||||
if previous, ok := clean[key]; !ok || previous != old {
|
||||
return false
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
dirty := m.dirty()
|
||||
value, loaded := dirty[key]
|
||||
if loaded && value == old {
|
||||
delete(dirty, key)
|
||||
m.clean.Store(dirty)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) Range(f func(key, value any) (shouldContinue bool)) {
|
||||
clean, _ := m.clean.Load().(map[any]any)
|
||||
for k, v := range clean {
|
||||
if !f(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) dirty() map[any]any {
|
||||
clean, _ := m.clean.Load().(map[any]any)
|
||||
dirty := make(map[any]any, len(clean)+1)
|
||||
for k, v := range clean {
|
||||
dirty[k] = v
|
||||
}
|
||||
return dirty
|
||||
}
|
||||
|
||||
func (m *DeepCopyMap) Clear() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.clean.Store((map[any]any)(nil))
|
||||
}
|
||||
359
src/sync/map_test.go
Normal file
359
src/sync/map_test.go
Normal file
@@ -0,0 +1,359 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"internal/testenv"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
type mapOp string
|
||||
|
||||
const (
|
||||
opLoad = mapOp("Load")
|
||||
opStore = mapOp("Store")
|
||||
opLoadOrStore = mapOp("LoadOrStore")
|
||||
opLoadAndDelete = mapOp("LoadAndDelete")
|
||||
opDelete = mapOp("Delete")
|
||||
opSwap = mapOp("Swap")
|
||||
opCompareAndSwap = mapOp("CompareAndSwap")
|
||||
opCompareAndDelete = mapOp("CompareAndDelete")
|
||||
opClear = mapOp("Clear")
|
||||
)
|
||||
|
||||
var mapOps = [...]mapOp{
|
||||
opLoad,
|
||||
opStore,
|
||||
opLoadOrStore,
|
||||
opLoadAndDelete,
|
||||
opDelete,
|
||||
opSwap,
|
||||
opCompareAndSwap,
|
||||
opCompareAndDelete,
|
||||
opClear,
|
||||
}
|
||||
|
||||
// mapCall is a quick.Generator for calls on mapInterface.
|
||||
type mapCall struct {
|
||||
op mapOp
|
||||
k, v any
|
||||
}
|
||||
|
||||
func (c mapCall) apply(m mapInterface) (any, bool) {
|
||||
switch c.op {
|
||||
case opLoad:
|
||||
return m.Load(c.k)
|
||||
case opStore:
|
||||
m.Store(c.k, c.v)
|
||||
return nil, false
|
||||
case opLoadOrStore:
|
||||
return m.LoadOrStore(c.k, c.v)
|
||||
case opLoadAndDelete:
|
||||
return m.LoadAndDelete(c.k)
|
||||
case opDelete:
|
||||
m.Delete(c.k)
|
||||
return nil, false
|
||||
case opSwap:
|
||||
return m.Swap(c.k, c.v)
|
||||
case opCompareAndSwap:
|
||||
if m.CompareAndSwap(c.k, c.v, rand.Int()) {
|
||||
m.Delete(c.k)
|
||||
return c.v, true
|
||||
}
|
||||
return nil, false
|
||||
case opCompareAndDelete:
|
||||
if m.CompareAndDelete(c.k, c.v) {
|
||||
if _, ok := m.Load(c.k); !ok {
|
||||
return nil, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
case opClear:
|
||||
m.Clear()
|
||||
return nil, false
|
||||
default:
|
||||
panic("invalid mapOp")
|
||||
}
|
||||
}
|
||||
|
||||
type mapResult struct {
|
||||
value any
|
||||
ok bool
|
||||
}
|
||||
|
||||
func randValue(r *rand.Rand) any {
|
||||
b := make([]byte, r.Intn(4))
|
||||
for i := range b {
|
||||
b[i] = 'a' + byte(rand.Intn(26))
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
|
||||
c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)}
|
||||
switch c.op {
|
||||
case opStore, opLoadOrStore:
|
||||
c.v = randValue(r)
|
||||
}
|
||||
return reflect.ValueOf(c)
|
||||
}
|
||||
|
||||
func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[any]any) {
|
||||
for _, c := range calls {
|
||||
v, ok := c.apply(m)
|
||||
results = append(results, mapResult{v, ok})
|
||||
}
|
||||
|
||||
final = make(map[any]any)
|
||||
m.Range(func(k, v any) bool {
|
||||
final[k] = v
|
||||
return true
|
||||
})
|
||||
|
||||
return results, final
|
||||
}
|
||||
|
||||
func applyMap(calls []mapCall) ([]mapResult, map[any]any) {
|
||||
return applyCalls(new(sync.Map), calls)
|
||||
}
|
||||
|
||||
func applyRWMutexMap(calls []mapCall) ([]mapResult, map[any]any) {
|
||||
return applyCalls(new(RWMutexMap), calls)
|
||||
}
|
||||
|
||||
func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[any]any) {
|
||||
return applyCalls(new(DeepCopyMap), calls)
|
||||
}
|
||||
|
||||
func TestMapMatchesRWMutex(t *testing.T) {
|
||||
if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapMatchesDeepCopy(t *testing.T) {
|
||||
if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentRange(t *testing.T) {
|
||||
const mapSize = 1 << 10
|
||||
|
||||
m := new(sync.Map)
|
||||
for n := int64(1); n <= mapSize; n++ {
|
||||
m.Store(n, int64(n))
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
close(done)
|
||||
wg.Wait()
|
||||
}()
|
||||
for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- {
|
||||
r := rand.New(rand.NewSource(g))
|
||||
wg.Add(1)
|
||||
go func(g int64) {
|
||||
defer wg.Done()
|
||||
for i := int64(0); ; i++ {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
for n := int64(1); n < mapSize; n++ {
|
||||
if r.Int63n(mapSize) == 0 {
|
||||
m.Store(n, n*i*g)
|
||||
} else {
|
||||
m.Load(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(g)
|
||||
}
|
||||
|
||||
iters := 1 << 10
|
||||
if testing.Short() {
|
||||
iters = 16
|
||||
}
|
||||
for n := iters; n > 0; n-- {
|
||||
seen := make(map[int64]bool, mapSize)
|
||||
|
||||
m.Range(func(ki, vi any) bool {
|
||||
k, v := ki.(int64), vi.(int64)
|
||||
if v%k != 0 {
|
||||
t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
|
||||
}
|
||||
if seen[k] {
|
||||
t.Fatalf("Range visited key %v twice", k)
|
||||
}
|
||||
seen[k] = true
|
||||
return true
|
||||
})
|
||||
|
||||
if len(seen) != mapSize {
|
||||
t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue40999(t *testing.T) {
|
||||
var m sync.Map
|
||||
|
||||
// Since the miss-counting in missLocked (via Delete)
|
||||
// compares the miss count with len(m.dirty),
|
||||
// add an initial entry to bias len(m.dirty) above the miss count.
|
||||
m.Store(nil, struct{}{})
|
||||
|
||||
var finalized uint32
|
||||
|
||||
// Set finalizers that count for collected keys. A non-zero count
|
||||
// indicates that keys have not been leaked.
|
||||
for atomic.LoadUint32(&finalized) == 0 {
|
||||
p := new(int)
|
||||
runtime.SetFinalizer(p, func(*int) {
|
||||
atomic.AddUint32(&finalized, 1)
|
||||
})
|
||||
m.Store(p, struct{}{})
|
||||
m.Delete(p)
|
||||
runtime.GC()
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapRangeNestedCall(t *testing.T) { // Issue 46399
|
||||
var m sync.Map
|
||||
for i, v := range [3]string{"hello", "world", "Go"} {
|
||||
m.Store(i, v)
|
||||
}
|
||||
m.Range(func(key, value any) bool {
|
||||
m.Range(func(key, value any) bool {
|
||||
// We should be able to load the key offered in the Range callback,
|
||||
// because there are no concurrent Delete involved in this tested map.
|
||||
if v, ok := m.Load(key); !ok || !reflect.DeepEqual(v, value) {
|
||||
t.Fatalf("Nested Range loads unexpected value, got %+v want %+v", v, value)
|
||||
}
|
||||
|
||||
// We didn't keep 42 and a value into the map before, if somehow we loaded
|
||||
// a value from such a key, meaning there must be an internal bug regarding
|
||||
// nested range in the Map.
|
||||
if _, loaded := m.LoadOrStore(42, "dummy"); loaded {
|
||||
t.Fatalf("Nested Range loads unexpected value, want store a new value")
|
||||
}
|
||||
|
||||
// Try to Store then LoadAndDelete the corresponding value with the key
|
||||
// 42 to the Map. In this case, the key 42 and associated value should be
|
||||
// removed from the Map. Therefore any future range won't observe key 42
|
||||
// as we checked in above.
|
||||
val := "sync.Map"
|
||||
m.Store(42, val)
|
||||
if v, loaded := m.LoadAndDelete(42); !loaded || !reflect.DeepEqual(v, val) {
|
||||
t.Fatalf("Nested Range loads unexpected value, got %v, want %v", v, val)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Remove key from Map on-the-fly.
|
||||
m.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
// After a Range of Delete, all keys should be removed and any
|
||||
// further Range won't invoke the callback. Hence length remains 0.
|
||||
length := 0
|
||||
m.Range(func(key, value any) bool {
|
||||
length++
|
||||
return true
|
||||
})
|
||||
|
||||
if length != 0 {
|
||||
t.Fatalf("Unexpected sync.Map size, got %v want %v", length, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareAndSwap_NonExistingKey(t *testing.T) {
|
||||
m := &sync.Map{}
|
||||
if m.CompareAndSwap(m, nil, 42) {
|
||||
// See https://go.dev/issue/51972#issuecomment-1126408637.
|
||||
t.Fatalf("CompareAndSwap on a non-existing key succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapRangeNoAllocations(t *testing.T) { // Issue 62404
|
||||
testenv.SkipIfOptimizationOff(t)
|
||||
var m sync.Map
|
||||
allocs := testing.AllocsPerRun(10, func() {
|
||||
m.Range(func(key, value any) bool {
|
||||
return true
|
||||
})
|
||||
})
|
||||
if allocs > 0 {
|
||||
t.Errorf("AllocsPerRun of m.Range = %v; want 0", allocs)
|
||||
}
|
||||
}
|
||||
|
||||
// TestConcurrentClear tests concurrent behavior of sync.Map properties to ensure no data races.
|
||||
// Checks for proper synchronization between Clear, Store, Load operations.
|
||||
func TestConcurrentClear(t *testing.T) {
|
||||
var m sync.Map
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(30) // 10 goroutines for writing, 10 goroutines for reading, 10 goroutines for waiting
|
||||
|
||||
// Writing data to the map concurrently
|
||||
for i := 0; i < 10; i++ {
|
||||
go func(k, v int) {
|
||||
defer wg.Done()
|
||||
m.Store(k, v)
|
||||
}(i, i*10)
|
||||
}
|
||||
|
||||
// Reading data from the map concurrently
|
||||
for i := 0; i < 10; i++ {
|
||||
go func(k int) {
|
||||
defer wg.Done()
|
||||
if value, ok := m.Load(k); ok {
|
||||
t.Logf("Key: %v, Value: %v\n", k, value)
|
||||
} else {
|
||||
t.Logf("Key: %v not found\n", k)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Clearing data from the map concurrently
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
m.Clear()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
m.Clear()
|
||||
|
||||
m.Range(func(k, v any) bool {
|
||||
t.Errorf("after Clear, Map contains (%v, %v); expected to be empty", k, v)
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func TestMapClearNoAllocations(t *testing.T) {
|
||||
testenv.SkipIfOptimizationOff(t)
|
||||
var m sync.Map
|
||||
allocs := testing.AllocsPerRun(10, func() {
|
||||
m.Clear()
|
||||
})
|
||||
if allocs > 0 {
|
||||
t.Errorf("AllocsPerRun of m.Clear = %v; want 0", allocs)
|
||||
}
|
||||
}
|
||||
261
src/sync/mutex.go
Normal file
261
src/sync/mutex.go
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sync provides basic synchronization primitives such as mutual
|
||||
// exclusion locks. Other than the [Once] and [WaitGroup] types, most are intended
|
||||
// for use by low-level library routines. Higher-level synchronization is
|
||||
// better done via channels and communication.
|
||||
//
|
||||
// Values containing the types defined in this package should not be copied.
|
||||
package sync
|
||||
|
||||
import (
|
||||
"internal/race"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Provided by runtime via linkname.
|
||||
func throw(string)
|
||||
func fatal(string)
|
||||
|
||||
// A Mutex is a mutual exclusion lock.
|
||||
// The zero value for a Mutex is an unlocked mutex.
|
||||
//
|
||||
// A Mutex must not be copied after first use.
|
||||
//
|
||||
// In the terminology of [the Go memory model],
|
||||
// the n'th call to [Mutex.Unlock] “synchronizes before” the m'th call to [Mutex.Lock]
|
||||
// for any n < m.
|
||||
// A successful call to [Mutex.TryLock] is equivalent to a call to Lock.
|
||||
// A failed call to TryLock does not establish any “synchronizes before”
|
||||
// relation at all.
|
||||
//
|
||||
// [the Go memory model]: https://go.dev/ref/mem
|
||||
type Mutex struct {
|
||||
state int32
|
||||
sema uint32
|
||||
}
|
||||
|
||||
// A Locker represents an object that can be locked and unlocked.
|
||||
type Locker interface {
|
||||
Lock()
|
||||
Unlock()
|
||||
}
|
||||
|
||||
const (
|
||||
mutexLocked = 1 << iota // mutex is locked
|
||||
mutexWoken
|
||||
mutexStarving
|
||||
mutexWaiterShift = iota
|
||||
|
||||
// Mutex fairness.
|
||||
//
|
||||
// Mutex can be in 2 modes of operations: normal and starvation.
|
||||
// In normal mode waiters are queued in FIFO order, but a woken up waiter
|
||||
// does not own the mutex and competes with new arriving goroutines over
|
||||
// the ownership. New arriving goroutines have an advantage -- they are
|
||||
// already running on CPU and there can be lots of them, so a woken up
|
||||
// waiter has good chances of losing. In such case it is queued at front
|
||||
// of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
|
||||
// it switches mutex to the starvation mode.
|
||||
//
|
||||
// In starvation mode ownership of the mutex is directly handed off from
|
||||
// the unlocking goroutine to the waiter at the front of the queue.
|
||||
// New arriving goroutines don't try to acquire the mutex even if it appears
|
||||
// to be unlocked, and don't try to spin. Instead they queue themselves at
|
||||
// the tail of the wait queue.
|
||||
//
|
||||
// If a waiter receives ownership of the mutex and sees that either
|
||||
// (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
|
||||
// it switches mutex back to normal operation mode.
|
||||
//
|
||||
// Normal mode has considerably better performance as a goroutine can acquire
|
||||
// a mutex several times in a row even if there are blocked waiters.
|
||||
// Starvation mode is important to prevent pathological cases of tail latency.
|
||||
starvationThresholdNs = 1e6
|
||||
)
|
||||
|
||||
// Lock locks m.
|
||||
// If the lock is already in use, the calling goroutine
|
||||
// blocks until the mutex is available.
|
||||
func (m *Mutex) Lock() {
|
||||
// Fast path: grab unlocked mutex.
|
||||
if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
|
||||
if race.Enabled {
|
||||
race.Acquire(unsafe.Pointer(m))
|
||||
}
|
||||
return
|
||||
}
|
||||
// Slow path (outlined so that the fast path can be inlined)
|
||||
m.lockSlow()
|
||||
}
|
||||
|
||||
// TryLock tries to lock m and reports whether it succeeded.
|
||||
//
|
||||
// Note that while correct uses of TryLock do exist, they are rare,
|
||||
// and use of TryLock is often a sign of a deeper problem
|
||||
// in a particular use of mutexes.
|
||||
func (m *Mutex) TryLock() bool {
|
||||
old := m.state
|
||||
if old&(mutexLocked|mutexStarving) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// There may be a goroutine waiting for the mutex, but we are
|
||||
// running now and can try to grab the mutex before that
|
||||
// goroutine wakes up.
|
||||
if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) {
|
||||
return false
|
||||
}
|
||||
|
||||
if race.Enabled {
|
||||
race.Acquire(unsafe.Pointer(m))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Mutex) lockSlow() {
|
||||
var waitStartTime int64
|
||||
starving := false
|
||||
awoke := false
|
||||
iter := 0
|
||||
old := m.state
|
||||
for {
|
||||
// Don't spin in starvation mode, ownership is handed off to waiters
|
||||
// so we won't be able to acquire the mutex anyway.
|
||||
if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
|
||||
// Active spinning makes sense.
|
||||
// Try to set mutexWoken flag to inform Unlock
|
||||
// to not wake other blocked goroutines.
|
||||
if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
|
||||
atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
|
||||
awoke = true
|
||||
}
|
||||
runtime_doSpin()
|
||||
iter++
|
||||
old = m.state
|
||||
continue
|
||||
}
|
||||
new := old
|
||||
// Don't try to acquire starving mutex, new arriving goroutines must queue.
|
||||
if old&mutexStarving == 0 {
|
||||
new |= mutexLocked
|
||||
}
|
||||
if old&(mutexLocked|mutexStarving) != 0 {
|
||||
new += 1 << mutexWaiterShift
|
||||
}
|
||||
// The current goroutine switches mutex to starvation mode.
|
||||
// But if the mutex is currently unlocked, don't do the switch.
|
||||
// Unlock expects that starving mutex has waiters, which will not
|
||||
// be true in this case.
|
||||
if starving && old&mutexLocked != 0 {
|
||||
new |= mutexStarving
|
||||
}
|
||||
if awoke {
|
||||
// The goroutine has been woken from sleep,
|
||||
// so we need to reset the flag in either case.
|
||||
if new&mutexWoken == 0 {
|
||||
throw("sync: inconsistent mutex state")
|
||||
}
|
||||
new &^= mutexWoken
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&m.state, old, new) {
|
||||
if old&(mutexLocked|mutexStarving) == 0 {
|
||||
break // locked the mutex with CAS
|
||||
}
|
||||
// If we were already waiting before, queue at the front of the queue.
|
||||
queueLifo := waitStartTime != 0
|
||||
if waitStartTime == 0 {
|
||||
waitStartTime = runtime_nanotime()
|
||||
}
|
||||
runtime_SemacquireMutex(&m.sema, queueLifo, 1)
|
||||
starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
|
||||
old = m.state
|
||||
if old&mutexStarving != 0 {
|
||||
// If this goroutine was woken and mutex is in starvation mode,
|
||||
// ownership was handed off to us but mutex is in somewhat
|
||||
// inconsistent state: mutexLocked is not set and we are still
|
||||
// accounted as waiter. Fix that.
|
||||
if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
|
||||
throw("sync: inconsistent mutex state")
|
||||
}
|
||||
delta := int32(mutexLocked - 1<<mutexWaiterShift)
|
||||
if !starving || old>>mutexWaiterShift == 1 {
|
||||
// Exit starvation mode.
|
||||
// Critical to do it here and consider wait time.
|
||||
// Starvation mode is so inefficient, that two goroutines
|
||||
// can go lock-step infinitely once they switch mutex
|
||||
// to starvation mode.
|
||||
delta -= mutexStarving
|
||||
}
|
||||
atomic.AddInt32(&m.state, delta)
|
||||
break
|
||||
}
|
||||
awoke = true
|
||||
iter = 0
|
||||
} else {
|
||||
old = m.state
|
||||
}
|
||||
}
|
||||
|
||||
if race.Enabled {
|
||||
race.Acquire(unsafe.Pointer(m))
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock unlocks m.
|
||||
// It is a run-time error if m is not locked on entry to Unlock.
|
||||
//
|
||||
// A locked [Mutex] is not associated with a particular goroutine.
|
||||
// It is allowed for one goroutine to lock a Mutex and then
|
||||
// arrange for another goroutine to unlock it.
|
||||
func (m *Mutex) Unlock() {
|
||||
if race.Enabled {
|
||||
_ = m.state
|
||||
race.Release(unsafe.Pointer(m))
|
||||
}
|
||||
|
||||
// Fast path: drop lock bit.
|
||||
new := atomic.AddInt32(&m.state, -mutexLocked)
|
||||
if new != 0 {
|
||||
// Outlined slow path to allow inlining the fast path.
|
||||
// To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
|
||||
m.unlockSlow(new)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Mutex) unlockSlow(new int32) {
|
||||
if (new+mutexLocked)&mutexLocked == 0 {
|
||||
fatal("sync: unlock of unlocked mutex")
|
||||
}
|
||||
if new&mutexStarving == 0 {
|
||||
old := new
|
||||
for {
|
||||
// If there are no waiters or a goroutine has already
|
||||
// been woken or grabbed the lock, no need to wake anyone.
|
||||
// In starvation mode ownership is directly handed off from unlocking
|
||||
// goroutine to the next waiter. We are not part of this chain,
|
||||
// since we did not observe mutexStarving when we unlocked the mutex above.
|
||||
// So get off the way.
|
||||
if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
|
||||
return
|
||||
}
|
||||
// Grab the right to wake someone.
|
||||
new = (old - 1<<mutexWaiterShift) | mutexWoken
|
||||
if atomic.CompareAndSwapInt32(&m.state, old, new) {
|
||||
runtime_Semrelease(&m.sema, false, 1)
|
||||
return
|
||||
}
|
||||
old = m.state
|
||||
}
|
||||
} else {
|
||||
// Starving mode: handoff mutex ownership to the next waiter, and yield
|
||||
// our time slice so that the next waiter can start to run immediately.
|
||||
// Note: mutexLocked is not set, the waiter will set it after wakeup.
|
||||
// But mutex is still considered locked if mutexStarving is set,
|
||||
// so new coming goroutines won't acquire it.
|
||||
runtime_Semrelease(&m.sema, true, 1)
|
||||
}
|
||||
}
|
||||
335
src/sync/mutex_test.go
Normal file
335
src/sync/mutex_test.go
Normal file
@@ -0,0 +1,335 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// GOMAXPROCS=10 go test
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
. "sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
|
||||
for i := 0; i < loops; i++ {
|
||||
Runtime_Semacquire(s)
|
||||
Runtime_Semrelease(s, false, 0)
|
||||
}
|
||||
cdone <- true
|
||||
}
|
||||
|
||||
func TestSemaphore(t *testing.T) {
|
||||
s := new(uint32)
|
||||
*s = 1
|
||||
c := make(chan bool)
|
||||
for i := 0; i < 10; i++ {
|
||||
go HammerSemaphore(s, 1000, c)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
<-c
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUncontendedSemaphore(b *testing.B) {
|
||||
s := new(uint32)
|
||||
*s = 1
|
||||
HammerSemaphore(s, b.N, make(chan bool, 2))
|
||||
}
|
||||
|
||||
func BenchmarkContendedSemaphore(b *testing.B) {
|
||||
b.StopTimer()
|
||||
s := new(uint32)
|
||||
*s = 1
|
||||
c := make(chan bool)
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
|
||||
b.StartTimer()
|
||||
|
||||
go HammerSemaphore(s, b.N/2, c)
|
||||
go HammerSemaphore(s, b.N/2, c)
|
||||
<-c
|
||||
<-c
|
||||
}
|
||||
|
||||
func HammerMutex(m *Mutex, loops int, cdone chan bool) {
|
||||
for i := 0; i < loops; i++ {
|
||||
if i%3 == 0 {
|
||||
if m.TryLock() {
|
||||
m.Unlock()
|
||||
}
|
||||
continue
|
||||
}
|
||||
m.Lock()
|
||||
m.Unlock()
|
||||
}
|
||||
cdone <- true
|
||||
}
|
||||
|
||||
func TestMutex(t *testing.T) {
|
||||
if n := runtime.SetMutexProfileFraction(1); n != 0 {
|
||||
t.Logf("got mutexrate %d expected 0", n)
|
||||
}
|
||||
defer runtime.SetMutexProfileFraction(0)
|
||||
|
||||
m := new(Mutex)
|
||||
|
||||
m.Lock()
|
||||
if m.TryLock() {
|
||||
t.Fatalf("TryLock succeeded with mutex locked")
|
||||
}
|
||||
m.Unlock()
|
||||
if !m.TryLock() {
|
||||
t.Fatalf("TryLock failed with mutex unlocked")
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
c := make(chan bool)
|
||||
for i := 0; i < 10; i++ {
|
||||
go HammerMutex(m, 1000, c)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
<-c
|
||||
}
|
||||
}
|
||||
|
||||
var misuseTests = []struct {
|
||||
name string
|
||||
f func()
|
||||
}{
|
||||
{
|
||||
"Mutex.Unlock",
|
||||
func() {
|
||||
var mu Mutex
|
||||
mu.Unlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
"Mutex.Unlock2",
|
||||
func() {
|
||||
var mu Mutex
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
mu.Unlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
"RWMutex.Unlock",
|
||||
func() {
|
||||
var mu RWMutex
|
||||
mu.Unlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
"RWMutex.Unlock2",
|
||||
func() {
|
||||
var mu RWMutex
|
||||
mu.RLock()
|
||||
mu.Unlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
"RWMutex.Unlock3",
|
||||
func() {
|
||||
var mu RWMutex
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
mu.Unlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
"RWMutex.RUnlock",
|
||||
func() {
|
||||
var mu RWMutex
|
||||
mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
"RWMutex.RUnlock2",
|
||||
func() {
|
||||
var mu RWMutex
|
||||
mu.Lock()
|
||||
mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
"RWMutex.RUnlock3",
|
||||
func() {
|
||||
var mu RWMutex
|
||||
mu.RLock()
|
||||
mu.RUnlock()
|
||||
mu.RUnlock()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
if len(os.Args) == 3 && os.Args[1] == "TESTMISUSE" {
|
||||
for _, test := range misuseTests {
|
||||
if test.name == os.Args[2] {
|
||||
func() {
|
||||
defer func() { recover() }()
|
||||
test.f()
|
||||
}()
|
||||
fmt.Printf("test completed\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
fmt.Printf("unknown test\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutexMisuse(t *testing.T) {
|
||||
testenv.MustHaveExec(t)
|
||||
for _, test := range misuseTests {
|
||||
out, err := exec.Command(os.Args[0], "TESTMISUSE", test.name).CombinedOutput()
|
||||
if err == nil || !strings.Contains(string(out), "unlocked") {
|
||||
t.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test.name, err, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutexFairness(t *testing.T) {
|
||||
var mu Mutex
|
||||
stop := make(chan bool)
|
||||
defer close(stop)
|
||||
go func() {
|
||||
for {
|
||||
mu.Lock()
|
||||
time.Sleep(100 * time.Microsecond)
|
||||
mu.Unlock()
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
done := make(chan bool, 1)
|
||||
go func() {
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(100 * time.Microsecond)
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("can't acquire Mutex in 10 seconds")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMutexUncontended(b *testing.B) {
|
||||
type PaddedMutex struct {
|
||||
Mutex
|
||||
pad [128]uint8
|
||||
}
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var mu PaddedMutex
|
||||
for pb.Next() {
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkMutex(b *testing.B, slack, work bool) {
|
||||
var mu Mutex
|
||||
if slack {
|
||||
b.SetParallelism(10)
|
||||
}
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
foo := 0
|
||||
for pb.Next() {
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
if work {
|
||||
for i := 0; i < 100; i++ {
|
||||
foo *= 2
|
||||
foo /= 2
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = foo
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMutex(b *testing.B) {
|
||||
benchmarkMutex(b, false, false)
|
||||
}
|
||||
|
||||
func BenchmarkMutexSlack(b *testing.B) {
|
||||
benchmarkMutex(b, true, false)
|
||||
}
|
||||
|
||||
func BenchmarkMutexWork(b *testing.B) {
|
||||
benchmarkMutex(b, false, true)
|
||||
}
|
||||
|
||||
func BenchmarkMutexWorkSlack(b *testing.B) {
|
||||
benchmarkMutex(b, true, true)
|
||||
}
|
||||
|
||||
func BenchmarkMutexNoSpin(b *testing.B) {
|
||||
// This benchmark models a situation where spinning in the mutex should be
|
||||
// non-profitable and allows to confirm that spinning does not do harm.
|
||||
// To achieve this we create excess of goroutines most of which do local work.
|
||||
// These goroutines yield during local work, so that switching from
|
||||
// a blocked goroutine to other goroutines is profitable.
|
||||
// As a matter of fact, this benchmark still triggers some spinning in the mutex.
|
||||
var m Mutex
|
||||
var acc0, acc1 uint64
|
||||
b.SetParallelism(4)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
c := make(chan bool)
|
||||
var data [4 << 10]uint64
|
||||
for i := 0; pb.Next(); i++ {
|
||||
if i%4 == 0 {
|
||||
m.Lock()
|
||||
acc0 -= 100
|
||||
acc1 += 100
|
||||
m.Unlock()
|
||||
} else {
|
||||
for i := 0; i < len(data); i += 4 {
|
||||
data[i]++
|
||||
}
|
||||
// Elaborate way to say runtime.Gosched
|
||||
// that does not put the goroutine onto global runq.
|
||||
go func() {
|
||||
c <- true
|
||||
}()
|
||||
<-c
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkMutexSpin(b *testing.B) {
|
||||
// This benchmark models a situation where spinning in the mutex should be
|
||||
// profitable. To achieve this we create a goroutine per-proc.
|
||||
// These goroutines access considerable amount of local data so that
|
||||
// unnecessary rescheduling is penalized by cache misses.
|
||||
var m Mutex
|
||||
var acc0, acc1 uint64
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var data [16 << 10]uint64
|
||||
for i := 0; pb.Next(); i++ {
|
||||
m.Lock()
|
||||
acc0 -= 100
|
||||
acc1 += 100
|
||||
m.Unlock()
|
||||
for i := 0; i < len(data); i += 4 {
|
||||
data[i]++
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
78
src/sync/once.go
Normal file
78
src/sync/once.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Once is an object that will perform exactly one action.
|
||||
//
|
||||
// A Once must not be copied after first use.
|
||||
//
|
||||
// In the terminology of [the Go memory model],
|
||||
// the return from f “synchronizes before”
|
||||
// the return from any call of once.Do(f).
|
||||
//
|
||||
// [the Go memory model]: https://go.dev/ref/mem
|
||||
type Once struct {
|
||||
// done indicates whether the action has been performed.
|
||||
// It is first in the struct because it is used in the hot path.
|
||||
// The hot path is inlined at every call site.
|
||||
// Placing done first allows more compact instructions on some architectures (amd64/386),
|
||||
// and fewer instructions (to calculate offset) on other architectures.
|
||||
done atomic.Uint32
|
||||
m Mutex
|
||||
}
|
||||
|
||||
// Do calls the function f if and only if Do is being called for the
|
||||
// first time for this instance of [Once]. In other words, given
|
||||
//
|
||||
// var once Once
|
||||
//
|
||||
// if once.Do(f) is called multiple times, only the first call will invoke f,
|
||||
// even if f has a different value in each invocation. A new instance of
|
||||
// Once is required for each function to execute.
|
||||
//
|
||||
// Do is intended for initialization that must be run exactly once. Since f
|
||||
// is niladic, it may be necessary to use a function literal to capture the
|
||||
// arguments to a function to be invoked by Do:
|
||||
//
|
||||
// config.once.Do(func() { config.init(filename) })
|
||||
//
|
||||
// Because no call to Do returns until the one call to f returns, if f causes
|
||||
// Do to be called, it will deadlock.
|
||||
//
|
||||
// If f panics, Do considers it to have returned; future calls of Do return
|
||||
// without calling f.
|
||||
func (o *Once) Do(f func()) {
|
||||
// Note: Here is an incorrect implementation of Do:
|
||||
//
|
||||
// if o.done.CompareAndSwap(0, 1) {
|
||||
// f()
|
||||
// }
|
||||
//
|
||||
// Do guarantees that when it returns, f has finished.
|
||||
// This implementation would not implement that guarantee:
|
||||
// given two simultaneous calls, the winner of the cas would
|
||||
// call f, and the second would return immediately, without
|
||||
// waiting for the first's call to f to complete.
|
||||
// This is why the slow path falls back to a mutex, and why
|
||||
// the o.done.Store must be delayed until after f returns.
|
||||
|
||||
if o.done.Load() == 0 {
|
||||
// Outlined slow-path to allow inlining of the fast-path.
|
||||
o.doSlow(f)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Once) doSlow(f func()) {
|
||||
o.m.Lock()
|
||||
defer o.m.Unlock()
|
||||
if o.done.Load() == 0 {
|
||||
defer o.done.Store(1)
|
||||
f()
|
||||
}
|
||||
}
|
||||
68
src/sync/once_test.go
Normal file
68
src/sync/once_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
. "sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type one int
|
||||
|
||||
func (o *one) Increment() {
|
||||
*o++
|
||||
}
|
||||
|
||||
func run(t *testing.T, once *Once, o *one, c chan bool) {
|
||||
once.Do(func() { o.Increment() })
|
||||
if v := *o; v != 1 {
|
||||
t.Errorf("once failed inside run: %d is not 1", v)
|
||||
}
|
||||
c <- true
|
||||
}
|
||||
|
||||
func TestOnce(t *testing.T) {
|
||||
o := new(one)
|
||||
once := new(Once)
|
||||
c := make(chan bool)
|
||||
const N = 10
|
||||
for i := 0; i < N; i++ {
|
||||
go run(t, once, o, c)
|
||||
}
|
||||
for i := 0; i < N; i++ {
|
||||
<-c
|
||||
}
|
||||
if *o != 1 {
|
||||
t.Errorf("once failed outside run: %d is not 1", *o)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOncePanic(t *testing.T) {
|
||||
var once Once
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatalf("Once.Do did not panic")
|
||||
}
|
||||
}()
|
||||
once.Do(func() {
|
||||
panic("failed")
|
||||
})
|
||||
}()
|
||||
|
||||
once.Do(func() {
|
||||
t.Fatalf("Once.Do called twice")
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkOnce(b *testing.B) {
|
||||
var once Once
|
||||
f := func() {}
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
once.Do(f)
|
||||
}
|
||||
})
|
||||
}
|
||||
100
src/sync/oncefunc.go
Normal file
100
src/sync/oncefunc.go
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
// OnceFunc returns a function that invokes f only once. The returned function
|
||||
// may be called concurrently.
|
||||
//
|
||||
// If f panics, the returned function will panic with the same value on every call.
|
||||
func OnceFunc(f func()) func() {
|
||||
var (
|
||||
once Once
|
||||
valid bool
|
||||
p any
|
||||
)
|
||||
// Construct the inner closure just once to reduce costs on the fast path.
|
||||
g := func() {
|
||||
defer func() {
|
||||
p = recover()
|
||||
if !valid {
|
||||
// Re-panic immediately so on the first call the user gets a
|
||||
// complete stack trace into f.
|
||||
panic(p)
|
||||
}
|
||||
}()
|
||||
f()
|
||||
f = nil // Do not keep f alive after invoking it.
|
||||
valid = true // Set only if f does not panic.
|
||||
}
|
||||
return func() {
|
||||
once.Do(g)
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnceValue returns a function that invokes f only once and returns the value
|
||||
// returned by f. The returned function may be called concurrently.
|
||||
//
|
||||
// If f panics, the returned function will panic with the same value on every call.
|
||||
func OnceValue[T any](f func() T) func() T {
|
||||
var (
|
||||
once Once
|
||||
valid bool
|
||||
p any
|
||||
result T
|
||||
)
|
||||
g := func() {
|
||||
defer func() {
|
||||
p = recover()
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
}()
|
||||
result = f()
|
||||
f = nil
|
||||
valid = true
|
||||
}
|
||||
return func() T {
|
||||
once.Do(g)
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// OnceValues returns a function that invokes f only once and returns the values
|
||||
// returned by f. The returned function may be called concurrently.
|
||||
//
|
||||
// If f panics, the returned function will panic with the same value on every call.
|
||||
func OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
|
||||
var (
|
||||
once Once
|
||||
valid bool
|
||||
p any
|
||||
r1 T1
|
||||
r2 T2
|
||||
)
|
||||
g := func() {
|
||||
defer func() {
|
||||
p = recover()
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
}()
|
||||
r1, r2 = f()
|
||||
f = nil
|
||||
valid = true
|
||||
}
|
||||
return func() (T1, T2) {
|
||||
once.Do(g)
|
||||
if !valid {
|
||||
panic(p)
|
||||
}
|
||||
return r1, r2
|
||||
}
|
||||
}
|
||||
315
src/sync/oncefunc_test.go
Normal file
315
src/sync/oncefunc_test.go
Normal file
@@ -0,0 +1,315 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
// We assume that the Once.Do tests have already covered parallelism.
|
||||
|
||||
func TestOnceFunc(t *testing.T) {
|
||||
calls := 0
|
||||
f := sync.OnceFunc(func() { calls++ })
|
||||
allocs := testing.AllocsPerRun(10, f)
|
||||
if calls != 1 {
|
||||
t.Errorf("want calls==1, got %d", calls)
|
||||
}
|
||||
if allocs != 0 {
|
||||
t.Errorf("want 0 allocations per call, got %v", allocs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnceValue(t *testing.T) {
|
||||
calls := 0
|
||||
f := sync.OnceValue(func() int {
|
||||
calls++
|
||||
return calls
|
||||
})
|
||||
allocs := testing.AllocsPerRun(10, func() { f() })
|
||||
value := f()
|
||||
if calls != 1 {
|
||||
t.Errorf("want calls==1, got %d", calls)
|
||||
}
|
||||
if value != 1 {
|
||||
t.Errorf("want value==1, got %d", value)
|
||||
}
|
||||
if allocs != 0 {
|
||||
t.Errorf("want 0 allocations per call, got %v", allocs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnceValues(t *testing.T) {
|
||||
calls := 0
|
||||
f := sync.OnceValues(func() (int, int) {
|
||||
calls++
|
||||
return calls, calls + 1
|
||||
})
|
||||
allocs := testing.AllocsPerRun(10, func() { f() })
|
||||
v1, v2 := f()
|
||||
if calls != 1 {
|
||||
t.Errorf("want calls==1, got %d", calls)
|
||||
}
|
||||
if v1 != 1 || v2 != 2 {
|
||||
t.Errorf("want v1==1 and v2==2, got %d and %d", v1, v2)
|
||||
}
|
||||
if allocs != 0 {
|
||||
t.Errorf("want 0 allocations per call, got %v", allocs)
|
||||
}
|
||||
}
|
||||
|
||||
func testOncePanicX(t *testing.T, calls *int, f func()) {
|
||||
testOncePanicWith(t, calls, f, func(label string, p any) {
|
||||
if p != "x" {
|
||||
t.Fatalf("%s: want panic %v, got %v", label, "x", p)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testOncePanicWith(t *testing.T, calls *int, f func(), check func(label string, p any)) {
|
||||
// Check that the each call to f panics with the same value, but the
|
||||
// underlying function is only called once.
|
||||
for _, label := range []string{"first time", "second time"} {
|
||||
var p any
|
||||
panicked := true
|
||||
func() {
|
||||
defer func() {
|
||||
p = recover()
|
||||
}()
|
||||
f()
|
||||
panicked = false
|
||||
}()
|
||||
if !panicked {
|
||||
t.Fatalf("%s: f did not panic", label)
|
||||
}
|
||||
check(label, p)
|
||||
}
|
||||
if *calls != 1 {
|
||||
t.Errorf("want calls==1, got %d", *calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnceFuncPanic(t *testing.T) {
|
||||
calls := 0
|
||||
f := sync.OnceFunc(func() {
|
||||
calls++
|
||||
panic("x")
|
||||
})
|
||||
testOncePanicX(t, &calls, f)
|
||||
}
|
||||
|
||||
func TestOnceValuePanic(t *testing.T) {
|
||||
calls := 0
|
||||
f := sync.OnceValue(func() int {
|
||||
calls++
|
||||
panic("x")
|
||||
})
|
||||
testOncePanicX(t, &calls, func() { f() })
|
||||
}
|
||||
|
||||
func TestOnceValuesPanic(t *testing.T) {
|
||||
calls := 0
|
||||
f := sync.OnceValues(func() (int, int) {
|
||||
calls++
|
||||
panic("x")
|
||||
})
|
||||
testOncePanicX(t, &calls, func() { f() })
|
||||
}
|
||||
|
||||
func TestOnceFuncPanicNil(t *testing.T) {
|
||||
calls := 0
|
||||
f := sync.OnceFunc(func() {
|
||||
calls++
|
||||
panic(nil)
|
||||
})
|
||||
testOncePanicWith(t, &calls, f, func(label string, p any) {
|
||||
switch p.(type) {
|
||||
case nil, *runtime.PanicNilError:
|
||||
return
|
||||
}
|
||||
t.Fatalf("%s: want nil panic, got %v", label, p)
|
||||
})
|
||||
}
|
||||
|
||||
func TestOnceFuncGoexit(t *testing.T) {
|
||||
// If f calls Goexit, the results are unspecified. But check that f doesn't
|
||||
// get called twice.
|
||||
calls := 0
|
||||
f := sync.OnceFunc(func() {
|
||||
calls++
|
||||
runtime.Goexit()
|
||||
})
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 2; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { recover() }()
|
||||
f()
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
if calls != 1 {
|
||||
t.Errorf("want calls==1, got %d", calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnceFuncPanicTraceback(t *testing.T) {
|
||||
// Test that on the first invocation of a OnceFunc, the stack trace goes all
|
||||
// the way to the origin of the panic.
|
||||
f := sync.OnceFunc(onceFuncPanic)
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != "x" {
|
||||
t.Fatalf("want panic %v, got %v", "x", p)
|
||||
}
|
||||
stack := debug.Stack()
|
||||
want := "sync_test.onceFuncPanic"
|
||||
if !bytes.Contains(stack, []byte(want)) {
|
||||
t.Fatalf("want stack containing %v, got:\n%s", want, string(stack))
|
||||
}
|
||||
}()
|
||||
f()
|
||||
}
|
||||
|
||||
func onceFuncPanic() {
|
||||
panic("x")
|
||||
}
|
||||
|
||||
func TestOnceXGC(t *testing.T) {
|
||||
fns := map[string]func([]byte) func(){
|
||||
"OnceFunc": func(buf []byte) func() {
|
||||
return sync.OnceFunc(func() { buf[0] = 1 })
|
||||
},
|
||||
"OnceValue": func(buf []byte) func() {
|
||||
f := sync.OnceValue(func() any { buf[0] = 1; return nil })
|
||||
return func() { f() }
|
||||
},
|
||||
"OnceValues": func(buf []byte) func() {
|
||||
f := sync.OnceValues(func() (any, any) { buf[0] = 1; return nil, nil })
|
||||
return func() { f() }
|
||||
},
|
||||
}
|
||||
for n, fn := range fns {
|
||||
t.Run(n, func(t *testing.T) {
|
||||
buf := make([]byte, 1024)
|
||||
var gc atomic.Bool
|
||||
runtime.SetFinalizer(&buf[0], func(_ *byte) {
|
||||
gc.Store(true)
|
||||
})
|
||||
f := fn(buf)
|
||||
gcwaitfin()
|
||||
if gc.Load() != false {
|
||||
t.Fatal("wrapped function garbage collected too early")
|
||||
}
|
||||
f()
|
||||
gcwaitfin()
|
||||
if gc.Load() != true {
|
||||
// Even if f is still alive, the function passed to Once(Func|Value|Values)
|
||||
// is not kept alive after the first call to f.
|
||||
t.Fatal("wrapped function should be garbage collected, but still live")
|
||||
}
|
||||
f()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// gcwaitfin performs garbage collection and waits for all finalizers to run.
|
||||
func gcwaitfin() {
|
||||
runtime.GC()
|
||||
runtime_blockUntilEmptyFinalizerQueue(math.MaxInt64)
|
||||
}
|
||||
|
||||
//go:linkname runtime_blockUntilEmptyFinalizerQueue runtime.blockUntilEmptyFinalizerQueue
|
||||
func runtime_blockUntilEmptyFinalizerQueue(int64) bool
|
||||
|
||||
var (
|
||||
onceFunc = sync.OnceFunc(func() {})
|
||||
|
||||
onceFuncOnce sync.Once
|
||||
)
|
||||
|
||||
func doOnceFunc() {
|
||||
onceFuncOnce.Do(func() {})
|
||||
}
|
||||
|
||||
func BenchmarkOnceFunc(b *testing.B) {
|
||||
b.Run("v=Once", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// The baseline is direct use of sync.Once.
|
||||
doOnceFunc()
|
||||
}
|
||||
})
|
||||
b.Run("v=Global", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// As of 3/2023, the compiler doesn't recognize that onceFunc is
|
||||
// never mutated and is a closure that could be inlined.
|
||||
// Too bad, because this is how OnceFunc will usually be used.
|
||||
onceFunc()
|
||||
}
|
||||
})
|
||||
b.Run("v=Local", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
// As of 3/2023, the compiler *does* recognize this local binding as an
|
||||
// inlinable closure. This is the best case for OnceFunc, but probably
|
||||
// not typical usage.
|
||||
f := sync.OnceFunc(func() {})
|
||||
for i := 0; i < b.N; i++ {
|
||||
f()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
onceValue = sync.OnceValue(func() int { return 42 })
|
||||
|
||||
onceValueOnce sync.Once
|
||||
onceValueValue int
|
||||
)
|
||||
|
||||
func doOnceValue() int {
|
||||
onceValueOnce.Do(func() {
|
||||
onceValueValue = 42
|
||||
})
|
||||
return onceValueValue
|
||||
}
|
||||
|
||||
func BenchmarkOnceValue(b *testing.B) {
|
||||
// See BenchmarkOnceFunc
|
||||
b.Run("v=Once", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if want, got := 42, doOnceValue(); want != got {
|
||||
b.Fatalf("want %d, got %d", want, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
b.Run("v=Global", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if want, got := 42, onceValue(); want != got {
|
||||
b.Fatalf("want %d, got %d", want, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
b.Run("v=Local", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
onceValue := sync.OnceValue(func() int { return 42 })
|
||||
for i := 0; i < b.N; i++ {
|
||||
if want, got := 42, onceValue(); want != got {
|
||||
b.Fatalf("want %d, got %d", want, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
318
src/sync/pool.go
Normal file
318
src/sync/pool.go
Normal file
@@ -0,0 +1,318 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"internal/race"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// A Pool is a set of temporary objects that may be individually saved and
|
||||
// retrieved.
|
||||
//
|
||||
// Any item stored in the Pool may be removed automatically at any time without
|
||||
// notification. If the Pool holds the only reference when this happens, the
|
||||
// item might be deallocated.
|
||||
//
|
||||
// A Pool is safe for use by multiple goroutines simultaneously.
|
||||
//
|
||||
// Pool's purpose is to cache allocated but unused items for later reuse,
|
||||
// relieving pressure on the garbage collector. That is, it makes it easy to
|
||||
// build efficient, thread-safe free lists. However, it is not suitable for all
|
||||
// free lists.
|
||||
//
|
||||
// An appropriate use of a Pool is to manage a group of temporary items
|
||||
// silently shared among and potentially reused by concurrent independent
|
||||
// clients of a package. Pool provides a way to amortize allocation overhead
|
||||
// across many clients.
|
||||
//
|
||||
// An example of good use of a Pool is in the fmt package, which maintains a
|
||||
// dynamically-sized store of temporary output buffers. The store scales under
|
||||
// load (when many goroutines are actively printing) and shrinks when
|
||||
// quiescent.
|
||||
//
|
||||
// On the other hand, a free list maintained as part of a short-lived object is
|
||||
// not a suitable use for a Pool, since the overhead does not amortize well in
|
||||
// that scenario. It is more efficient to have such objects implement their own
|
||||
// free list.
|
||||
//
|
||||
// A Pool must not be copied after first use.
|
||||
//
|
||||
// In the terminology of [the Go memory model], a call to Put(x) “synchronizes before”
|
||||
// a call to [Pool.Get] returning that same value x.
|
||||
// Similarly, a call to New returning x “synchronizes before”
|
||||
// a call to Get returning that same value x.
|
||||
//
|
||||
// [the Go memory model]: https://go.dev/ref/mem
|
||||
type Pool struct {
|
||||
noCopy noCopy
|
||||
|
||||
local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
|
||||
localSize uintptr // size of the local array
|
||||
|
||||
victim unsafe.Pointer // local from previous cycle
|
||||
victimSize uintptr // size of victims array
|
||||
|
||||
// New optionally specifies a function to generate
|
||||
// a value when Get would otherwise return nil.
|
||||
// It may not be changed concurrently with calls to Get.
|
||||
New func() any
|
||||
}
|
||||
|
||||
// Local per-P Pool appendix.
|
||||
type poolLocalInternal struct {
|
||||
private any // Can be used only by the respective P.
|
||||
shared poolChain // Local P can pushHead/popHead; any P can popTail.
|
||||
}
|
||||
|
||||
type poolLocal struct {
|
||||
poolLocalInternal
|
||||
|
||||
// Prevents false sharing on widespread platforms with
|
||||
// 128 mod (cache line size) = 0 .
|
||||
pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
|
||||
}
|
||||
|
||||
// from runtime
|
||||
//
|
||||
//go:linkname runtime_randn runtime.randn
|
||||
func runtime_randn(n uint32) uint32
|
||||
|
||||
var poolRaceHash [128]uint64
|
||||
|
||||
// poolRaceAddr returns an address to use as the synchronization point
|
||||
// for race detector logic. We don't use the actual pointer stored in x
|
||||
// directly, for fear of conflicting with other synchronization on that address.
|
||||
// Instead, we hash the pointer to get an index into poolRaceHash.
|
||||
// See discussion on golang.org/cl/31589.
|
||||
func poolRaceAddr(x any) unsafe.Pointer {
|
||||
ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1])
|
||||
h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16)
|
||||
return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))])
|
||||
}
|
||||
|
||||
// Put adds x to the pool.
|
||||
func (p *Pool) Put(x any) {
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
if race.Enabled {
|
||||
if runtime_randn(4) == 0 {
|
||||
// Randomly drop x on floor.
|
||||
return
|
||||
}
|
||||
race.ReleaseMerge(poolRaceAddr(x))
|
||||
race.Disable()
|
||||
}
|
||||
l, _ := p.pin()
|
||||
if l.private == nil {
|
||||
l.private = x
|
||||
} else {
|
||||
l.shared.pushHead(x)
|
||||
}
|
||||
runtime_procUnpin()
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
}
|
||||
}
|
||||
|
||||
// Get selects an arbitrary item from the [Pool], removes it from the
|
||||
// Pool, and returns it to the caller.
|
||||
// Get may choose to ignore the pool and treat it as empty.
|
||||
// Callers should not assume any relation between values passed to [Pool.Put] and
|
||||
// the values returned by Get.
|
||||
//
|
||||
// If Get would otherwise return nil and p.New is non-nil, Get returns
|
||||
// the result of calling p.New.
|
||||
func (p *Pool) Get() any {
|
||||
if race.Enabled {
|
||||
race.Disable()
|
||||
}
|
||||
l, pid := p.pin()
|
||||
x := l.private
|
||||
l.private = nil
|
||||
if x == nil {
|
||||
// Try to pop the head of the local shard. We prefer
|
||||
// the head over the tail for temporal locality of
|
||||
// reuse.
|
||||
x, _ = l.shared.popHead()
|
||||
if x == nil {
|
||||
x = p.getSlow(pid)
|
||||
}
|
||||
}
|
||||
runtime_procUnpin()
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
if x != nil {
|
||||
race.Acquire(poolRaceAddr(x))
|
||||
}
|
||||
}
|
||||
if x == nil && p.New != nil {
|
||||
x = p.New()
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (p *Pool) getSlow(pid int) any {
|
||||
// See the comment in pin regarding ordering of the loads.
|
||||
size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
|
||||
locals := p.local // load-consume
|
||||
// Try to steal one element from other procs.
|
||||
for i := 0; i < int(size); i++ {
|
||||
l := indexLocal(locals, (pid+i+1)%int(size))
|
||||
if x, _ := l.shared.popTail(); x != nil {
|
||||
return x
|
||||
}
|
||||
}
|
||||
|
||||
// Try the victim cache. We do this after attempting to steal
|
||||
// from all primary caches because we want objects in the
|
||||
// victim cache to age out if at all possible.
|
||||
size = atomic.LoadUintptr(&p.victimSize)
|
||||
if uintptr(pid) >= size {
|
||||
return nil
|
||||
}
|
||||
locals = p.victim
|
||||
l := indexLocal(locals, pid)
|
||||
if x := l.private; x != nil {
|
||||
l.private = nil
|
||||
return x
|
||||
}
|
||||
for i := 0; i < int(size); i++ {
|
||||
l := indexLocal(locals, (pid+i)%int(size))
|
||||
if x, _ := l.shared.popTail(); x != nil {
|
||||
return x
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the victim cache as empty for future gets don't bother
|
||||
// with it.
|
||||
atomic.StoreUintptr(&p.victimSize, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pin pins the current goroutine to P, disables preemption and
|
||||
// returns poolLocal pool for the P and the P's id.
|
||||
// Caller must call runtime_procUnpin() when done with the pool.
|
||||
func (p *Pool) pin() (*poolLocal, int) {
|
||||
// Check whether p is nil to get a panic.
|
||||
// Otherwise the nil dereference happens while the m is pinned,
|
||||
// causing a fatal error rather than a panic.
|
||||
if p == nil {
|
||||
panic("nil Pool")
|
||||
}
|
||||
|
||||
pid := runtime_procPin()
|
||||
// In pinSlow we store to local and then to localSize, here we load in opposite order.
|
||||
// Since we've disabled preemption, GC cannot happen in between.
|
||||
// Thus here we must observe local at least as large localSize.
|
||||
// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
|
||||
s := runtime_LoadAcquintptr(&p.localSize) // load-acquire
|
||||
l := p.local // load-consume
|
||||
if uintptr(pid) < s {
|
||||
return indexLocal(l, pid), pid
|
||||
}
|
||||
return p.pinSlow()
|
||||
}
|
||||
|
||||
func (p *Pool) pinSlow() (*poolLocal, int) {
|
||||
// Retry under the mutex.
|
||||
// Can not lock the mutex while pinned.
|
||||
runtime_procUnpin()
|
||||
allPoolsMu.Lock()
|
||||
defer allPoolsMu.Unlock()
|
||||
pid := runtime_procPin()
|
||||
// poolCleanup won't be called while we are pinned.
|
||||
s := p.localSize
|
||||
l := p.local
|
||||
if uintptr(pid) < s {
|
||||
return indexLocal(l, pid), pid
|
||||
}
|
||||
if p.local == nil {
|
||||
allPools = append(allPools, p)
|
||||
}
|
||||
// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
|
||||
size := runtime.GOMAXPROCS(0)
|
||||
local := make([]poolLocal, size)
|
||||
atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
|
||||
runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release
|
||||
return &local[pid], pid
|
||||
}
|
||||
|
||||
// poolCleanup should be an internal detail,
|
||||
// but widely used packages access it using linkname.
|
||||
// Notable members of the hall of shame include:
|
||||
// - github.com/bytedance/gopkg
|
||||
// - github.com/songzhibin97/gkit
|
||||
//
|
||||
// Do not remove or change the type signature.
|
||||
// See go.dev/issue/67401.
|
||||
//
|
||||
//go:linkname poolCleanup
|
||||
func poolCleanup() {
|
||||
// This function is called with the world stopped, at the beginning of a garbage collection.
|
||||
// It must not allocate and probably should not call any runtime functions.
|
||||
|
||||
// Because the world is stopped, no pool user can be in a
|
||||
// pinned section (in effect, this has all Ps pinned).
|
||||
|
||||
// Drop victim caches from all pools.
|
||||
for _, p := range oldPools {
|
||||
p.victim = nil
|
||||
p.victimSize = 0
|
||||
}
|
||||
|
||||
// Move primary cache to victim cache.
|
||||
for _, p := range allPools {
|
||||
p.victim = p.local
|
||||
p.victimSize = p.localSize
|
||||
p.local = nil
|
||||
p.localSize = 0
|
||||
}
|
||||
|
||||
// The pools with non-empty primary caches now have non-empty
|
||||
// victim caches and no pools have primary caches.
|
||||
oldPools, allPools = allPools, nil
|
||||
}
|
||||
|
||||
var (
|
||||
allPoolsMu Mutex
|
||||
|
||||
// allPools is the set of pools that have non-empty primary
|
||||
// caches. Protected by either 1) allPoolsMu and pinning or 2)
|
||||
// STW.
|
||||
allPools []*Pool
|
||||
|
||||
// oldPools is the set of pools that may have non-empty victim
|
||||
// caches. Protected by STW.
|
||||
oldPools []*Pool
|
||||
)
|
||||
|
||||
func init() {
|
||||
runtime_registerPoolCleanup(poolCleanup)
|
||||
}
|
||||
|
||||
func indexLocal(l unsafe.Pointer, i int) *poolLocal {
|
||||
lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
|
||||
return (*poolLocal)(lp)
|
||||
}
|
||||
|
||||
// Implemented in runtime.
|
||||
func runtime_registerPoolCleanup(cleanup func())
|
||||
func runtime_procPin() int
|
||||
func runtime_procUnpin()
|
||||
|
||||
// The below are implemented in internal/runtime/atomic and the
|
||||
// compiler also knows to intrinsify the symbol we linkname into this
|
||||
// package.
|
||||
|
||||
//go:linkname runtime_LoadAcquintptr internal/runtime/atomic.LoadAcquintptr
|
||||
func runtime_LoadAcquintptr(ptr *uintptr) uintptr
|
||||
|
||||
//go:linkname runtime_StoreReluintptr internal/runtime/atomic.StoreReluintptr
|
||||
func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr
|
||||
395
src/sync/pool_test.go
Normal file
395
src/sync/pool_test.go
Normal file
@@ -0,0 +1,395 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Pool is no-op under race detector, so all these tests do not work.
|
||||
//
|
||||
//go:build !race
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
. "sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPool(t *testing.T) {
|
||||
// disable GC so we can control when it happens.
|
||||
defer debug.SetGCPercent(debug.SetGCPercent(-1))
|
||||
var p Pool
|
||||
if p.Get() != nil {
|
||||
t.Fatal("expected empty")
|
||||
}
|
||||
|
||||
// Make sure that the goroutine doesn't migrate to another P
|
||||
// between Put and Get calls.
|
||||
Runtime_procPin()
|
||||
p.Put("a")
|
||||
p.Put("b")
|
||||
if g := p.Get(); g != "a" {
|
||||
t.Fatalf("got %#v; want a", g)
|
||||
}
|
||||
if g := p.Get(); g != "b" {
|
||||
t.Fatalf("got %#v; want b", g)
|
||||
}
|
||||
if g := p.Get(); g != nil {
|
||||
t.Fatalf("got %#v; want nil", g)
|
||||
}
|
||||
Runtime_procUnpin()
|
||||
|
||||
// Put in a large number of objects so they spill into
|
||||
// stealable space.
|
||||
for i := 0; i < 100; i++ {
|
||||
p.Put("c")
|
||||
}
|
||||
// After one GC, the victim cache should keep them alive.
|
||||
runtime.GC()
|
||||
if g := p.Get(); g != "c" {
|
||||
t.Fatalf("got %#v; want c after GC", g)
|
||||
}
|
||||
// A second GC should drop the victim cache.
|
||||
runtime.GC()
|
||||
if g := p.Get(); g != nil {
|
||||
t.Fatalf("got %#v; want nil after second GC", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoolNew(t *testing.T) {
|
||||
// disable GC so we can control when it happens.
|
||||
defer debug.SetGCPercent(debug.SetGCPercent(-1))
|
||||
|
||||
i := 0
|
||||
p := Pool{
|
||||
New: func() any {
|
||||
i++
|
||||
return i
|
||||
},
|
||||
}
|
||||
if v := p.Get(); v != 1 {
|
||||
t.Fatalf("got %v; want 1", v)
|
||||
}
|
||||
if v := p.Get(); v != 2 {
|
||||
t.Fatalf("got %v; want 2", v)
|
||||
}
|
||||
|
||||
// Make sure that the goroutine doesn't migrate to another P
|
||||
// between Put and Get calls.
|
||||
Runtime_procPin()
|
||||
p.Put(42)
|
||||
if v := p.Get(); v != 42 {
|
||||
t.Fatalf("got %v; want 42", v)
|
||||
}
|
||||
Runtime_procUnpin()
|
||||
|
||||
if v := p.Get(); v != 3 {
|
||||
t.Fatalf("got %v; want 3", v)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Pool does not hold pointers to previously cached resources.
|
||||
func TestPoolGC(t *testing.T) {
|
||||
testPool(t, true)
|
||||
}
|
||||
|
||||
// Test that Pool releases resources on GC.
|
||||
func TestPoolRelease(t *testing.T) {
|
||||
testPool(t, false)
|
||||
}
|
||||
|
||||
func testPool(t *testing.T, drain bool) {
|
||||
var p Pool
|
||||
const N = 100
|
||||
loop:
|
||||
for try := 0; try < 3; try++ {
|
||||
if try == 1 && testing.Short() {
|
||||
break
|
||||
}
|
||||
var fin, fin1 uint32
|
||||
for i := 0; i < N; i++ {
|
||||
v := new(string)
|
||||
runtime.SetFinalizer(v, func(vv *string) {
|
||||
atomic.AddUint32(&fin, 1)
|
||||
})
|
||||
p.Put(v)
|
||||
}
|
||||
if drain {
|
||||
for i := 0; i < N; i++ {
|
||||
p.Get()
|
||||
}
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
runtime.GC()
|
||||
time.Sleep(time.Duration(i*100+10) * time.Millisecond)
|
||||
// 1 pointer can remain on stack or elsewhere
|
||||
if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoolStress(t *testing.T) {
|
||||
const P = 10
|
||||
N := int(1e6)
|
||||
if testing.Short() {
|
||||
N /= 100
|
||||
}
|
||||
var p Pool
|
||||
done := make(chan bool)
|
||||
for i := 0; i < P; i++ {
|
||||
go func() {
|
||||
var v any = 0
|
||||
for j := 0; j < N; j++ {
|
||||
if v == nil {
|
||||
v = 0
|
||||
}
|
||||
p.Put(v)
|
||||
v = p.Get()
|
||||
if v != nil && v.(int) != 0 {
|
||||
t.Errorf("expect 0, got %v", v)
|
||||
break
|
||||
}
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
}
|
||||
for i := 0; i < P; i++ {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoolDequeue(t *testing.T) {
|
||||
testPoolDequeue(t, NewPoolDequeue(16))
|
||||
}
|
||||
|
||||
func TestPoolChain(t *testing.T) {
|
||||
testPoolDequeue(t, NewPoolChain())
|
||||
}
|
||||
|
||||
func testPoolDequeue(t *testing.T, d PoolDequeue) {
|
||||
const P = 10
|
||||
var N int = 2e6
|
||||
if testing.Short() {
|
||||
N = 1e3
|
||||
}
|
||||
have := make([]int32, N)
|
||||
var stop int32
|
||||
var wg WaitGroup
|
||||
record := func(val int) {
|
||||
atomic.AddInt32(&have[val], 1)
|
||||
if val == N-1 {
|
||||
atomic.StoreInt32(&stop, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Start P-1 consumers.
|
||||
for i := 1; i < P; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
fail := 0
|
||||
for atomic.LoadInt32(&stop) == 0 {
|
||||
val, ok := d.PopTail()
|
||||
if ok {
|
||||
fail = 0
|
||||
record(val.(int))
|
||||
} else {
|
||||
// Speed up the test by
|
||||
// allowing the pusher to run.
|
||||
if fail++; fail%100 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// Start 1 producer.
|
||||
nPopHead := 0
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for j := 0; j < N; j++ {
|
||||
for !d.PushHead(j) {
|
||||
// Allow a popper to run.
|
||||
runtime.Gosched()
|
||||
}
|
||||
if j%10 == 0 {
|
||||
val, ok := d.PopHead()
|
||||
if ok {
|
||||
nPopHead++
|
||||
record(val.(int))
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
// Check results.
|
||||
for i, count := range have {
|
||||
if count != 1 {
|
||||
t.Errorf("expected have[%d] = 1, got %d", i, count)
|
||||
}
|
||||
}
|
||||
// Check that at least some PopHeads succeeded. We skip this
|
||||
// check in short mode because it's common enough that the
|
||||
// queue will stay nearly empty all the time and a PopTail
|
||||
// will happen during the window between every PushHead and
|
||||
// PopHead.
|
||||
if !testing.Short() && nPopHead == 0 {
|
||||
t.Errorf("popHead never succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilPool(t *testing.T) {
|
||||
catch := func() {
|
||||
if recover() == nil {
|
||||
t.Error("expected panic")
|
||||
}
|
||||
}
|
||||
|
||||
var p *Pool
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
defer catch()
|
||||
if p.Get() != nil {
|
||||
t.Error("expected empty")
|
||||
}
|
||||
t.Error("should have panicked already")
|
||||
})
|
||||
t.Run("Put", func(t *testing.T) {
|
||||
defer catch()
|
||||
p.Put("a")
|
||||
t.Error("should have panicked already")
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkPool(b *testing.B) {
|
||||
var p Pool
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
p.Put(1)
|
||||
p.Get()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkPoolOverflow(b *testing.B) {
|
||||
var p Pool
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for b := 0; b < 100; b++ {
|
||||
p.Put(1)
|
||||
}
|
||||
for b := 0; b < 100; b++ {
|
||||
p.Get()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Simulate object starvation in order to force Ps to steal objects
|
||||
// from other Ps.
|
||||
func BenchmarkPoolStarvation(b *testing.B) {
|
||||
var p Pool
|
||||
count := 100
|
||||
// Reduce number of putted objects by 33 %. It creates objects starvation
|
||||
// that force P-local storage to steal objects from other Ps.
|
||||
countStarved := count - int(float32(count)*0.33)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for b := 0; b < countStarved; b++ {
|
||||
p.Put(1)
|
||||
}
|
||||
for b := 0; b < count; b++ {
|
||||
p.Get()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var globalSink any
|
||||
|
||||
func BenchmarkPoolSTW(b *testing.B) {
|
||||
// Take control of GC.
|
||||
defer debug.SetGCPercent(debug.SetGCPercent(-1))
|
||||
|
||||
var mstats runtime.MemStats
|
||||
var pauses []uint64
|
||||
|
||||
var p Pool
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Put a large number of items into a pool.
|
||||
const N = 100000
|
||||
var item any = 42
|
||||
for i := 0; i < N; i++ {
|
||||
p.Put(item)
|
||||
}
|
||||
// Do a GC.
|
||||
runtime.GC()
|
||||
// Record pause time.
|
||||
runtime.ReadMemStats(&mstats)
|
||||
pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
|
||||
}
|
||||
|
||||
// Get pause time stats.
|
||||
slices.Sort(pauses)
|
||||
var total uint64
|
||||
for _, ns := range pauses {
|
||||
total += ns
|
||||
}
|
||||
// ns/op for this benchmark is average STW time.
|
||||
b.ReportMetric(float64(total)/float64(b.N), "ns/op")
|
||||
b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
|
||||
b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
|
||||
}
|
||||
|
||||
func BenchmarkPoolExpensiveNew(b *testing.B) {
|
||||
// Populate a pool with items that are expensive to construct
|
||||
// to stress pool cleanup and subsequent reconstruction.
|
||||
|
||||
// Create a ballast so the GC has a non-zero heap size and
|
||||
// runs at reasonable times.
|
||||
globalSink = make([]byte, 8<<20)
|
||||
defer func() { globalSink = nil }()
|
||||
|
||||
// Create a pool that's "expensive" to fill.
|
||||
var p Pool
|
||||
var nNew uint64
|
||||
p.New = func() any {
|
||||
atomic.AddUint64(&nNew, 1)
|
||||
time.Sleep(time.Millisecond)
|
||||
return 42
|
||||
}
|
||||
var mstats1, mstats2 runtime.MemStats
|
||||
runtime.ReadMemStats(&mstats1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
// Simulate 100X the number of goroutines having items
|
||||
// checked out from the Pool simultaneously.
|
||||
items := make([]any, 100)
|
||||
var sink []byte
|
||||
for pb.Next() {
|
||||
// Stress the pool.
|
||||
for i := range items {
|
||||
items[i] = p.Get()
|
||||
// Simulate doing some work with this
|
||||
// item checked out.
|
||||
sink = make([]byte, 32<<10)
|
||||
}
|
||||
for i, v := range items {
|
||||
p.Put(v)
|
||||
items[i] = nil
|
||||
}
|
||||
}
|
||||
_ = sink
|
||||
})
|
||||
runtime.ReadMemStats(&mstats2)
|
||||
|
||||
b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
|
||||
b.ReportMetric(float64(nNew)/float64(b.N), "New/op")
|
||||
}
|
||||
302
src/sync/poolqueue.go
Normal file
302
src/sync/poolqueue.go
Normal file
@@ -0,0 +1,302 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// poolDequeue is a lock-free fixed-size single-producer,
|
||||
// multi-consumer queue. The single producer can both push and pop
|
||||
// from the head, and consumers can pop from the tail.
|
||||
//
|
||||
// It has the added feature that it nils out unused slots to avoid
|
||||
// unnecessary retention of objects. This is important for sync.Pool,
|
||||
// but not typically a property considered in the literature.
|
||||
type poolDequeue struct {
|
||||
// headTail packs together a 32-bit head index and a 32-bit
|
||||
// tail index. Both are indexes into vals modulo len(vals)-1.
|
||||
//
|
||||
// tail = index of oldest data in queue
|
||||
// head = index of next slot to fill
|
||||
//
|
||||
// Slots in the range [tail, head) are owned by consumers.
|
||||
// A consumer continues to own a slot outside this range until
|
||||
// it nils the slot, at which point ownership passes to the
|
||||
// producer.
|
||||
//
|
||||
// The head index is stored in the most-significant bits so
|
||||
// that we can atomically add to it and the overflow is
|
||||
// harmless.
|
||||
headTail atomic.Uint64
|
||||
|
||||
// vals is a ring buffer of interface{} values stored in this
|
||||
// dequeue. The size of this must be a power of 2.
|
||||
//
|
||||
// vals[i].typ is nil if the slot is empty and non-nil
|
||||
// otherwise. A slot is still in use until *both* the tail
|
||||
// index has moved beyond it and typ has been set to nil. This
|
||||
// is set to nil atomically by the consumer and read
|
||||
// atomically by the producer.
|
||||
vals []eface
|
||||
}
|
||||
|
||||
type eface struct {
|
||||
typ, val unsafe.Pointer
|
||||
}
|
||||
|
||||
const dequeueBits = 32
|
||||
|
||||
// dequeueLimit is the maximum size of a poolDequeue.
|
||||
//
|
||||
// This must be at most (1<<dequeueBits)/2 because detecting fullness
|
||||
// depends on wrapping around the ring buffer without wrapping around
|
||||
// the index. We divide by 4 so this fits in an int on 32-bit.
|
||||
const dequeueLimit = (1 << dequeueBits) / 4
|
||||
|
||||
// dequeueNil is used in poolDequeue to represent interface{}(nil).
|
||||
// Since we use nil to represent empty slots, we need a sentinel value
|
||||
// to represent nil.
|
||||
type dequeueNil *struct{}
|
||||
|
||||
func (d *poolDequeue) unpack(ptrs uint64) (head, tail uint32) {
|
||||
const mask = 1<<dequeueBits - 1
|
||||
head = uint32((ptrs >> dequeueBits) & mask)
|
||||
tail = uint32(ptrs & mask)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *poolDequeue) pack(head, tail uint32) uint64 {
|
||||
const mask = 1<<dequeueBits - 1
|
||||
return (uint64(head) << dequeueBits) |
|
||||
uint64(tail&mask)
|
||||
}
|
||||
|
||||
// pushHead adds val at the head of the queue. It returns false if the
|
||||
// queue is full. It must only be called by a single producer.
|
||||
func (d *poolDequeue) pushHead(val any) bool {
|
||||
ptrs := d.headTail.Load()
|
||||
head, tail := d.unpack(ptrs)
|
||||
if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
|
||||
// Queue is full.
|
||||
return false
|
||||
}
|
||||
slot := &d.vals[head&uint32(len(d.vals)-1)]
|
||||
|
||||
// Check if the head slot has been released by popTail.
|
||||
typ := atomic.LoadPointer(&slot.typ)
|
||||
if typ != nil {
|
||||
// Another goroutine is still cleaning up the tail, so
|
||||
// the queue is actually still full.
|
||||
return false
|
||||
}
|
||||
|
||||
// The head slot is free, so we own it.
|
||||
if val == nil {
|
||||
val = dequeueNil(nil)
|
||||
}
|
||||
*(*any)(unsafe.Pointer(slot)) = val
|
||||
|
||||
// Increment head. This passes ownership of slot to popTail
|
||||
// and acts as a store barrier for writing the slot.
|
||||
d.headTail.Add(1 << dequeueBits)
|
||||
return true
|
||||
}
|
||||
|
||||
// popHead removes and returns the element at the head of the queue.
|
||||
// It returns false if the queue is empty. It must only be called by a
|
||||
// single producer.
|
||||
func (d *poolDequeue) popHead() (any, bool) {
|
||||
var slot *eface
|
||||
for {
|
||||
ptrs := d.headTail.Load()
|
||||
head, tail := d.unpack(ptrs)
|
||||
if tail == head {
|
||||
// Queue is empty.
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Confirm tail and decrement head. We do this before
|
||||
// reading the value to take back ownership of this
|
||||
// slot.
|
||||
head--
|
||||
ptrs2 := d.pack(head, tail)
|
||||
if d.headTail.CompareAndSwap(ptrs, ptrs2) {
|
||||
// We successfully took back slot.
|
||||
slot = &d.vals[head&uint32(len(d.vals)-1)]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
val := *(*any)(unsafe.Pointer(slot))
|
||||
if val == dequeueNil(nil) {
|
||||
val = nil
|
||||
}
|
||||
// Zero the slot. Unlike popTail, this isn't racing with
|
||||
// pushHead, so we don't need to be careful here.
|
||||
*slot = eface{}
|
||||
return val, true
|
||||
}
|
||||
|
||||
// popTail removes and returns the element at the tail of the queue.
|
||||
// It returns false if the queue is empty. It may be called by any
|
||||
// number of consumers.
|
||||
func (d *poolDequeue) popTail() (any, bool) {
|
||||
var slot *eface
|
||||
for {
|
||||
ptrs := d.headTail.Load()
|
||||
head, tail := d.unpack(ptrs)
|
||||
if tail == head {
|
||||
// Queue is empty.
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Confirm head and tail (for our speculative check
|
||||
// above) and increment tail. If this succeeds, then
|
||||
// we own the slot at tail.
|
||||
ptrs2 := d.pack(head, tail+1)
|
||||
if d.headTail.CompareAndSwap(ptrs, ptrs2) {
|
||||
// Success.
|
||||
slot = &d.vals[tail&uint32(len(d.vals)-1)]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// We now own slot.
|
||||
val := *(*any)(unsafe.Pointer(slot))
|
||||
if val == dequeueNil(nil) {
|
||||
val = nil
|
||||
}
|
||||
|
||||
// Tell pushHead that we're done with this slot. Zeroing the
|
||||
// slot is also important so we don't leave behind references
|
||||
// that could keep this object live longer than necessary.
|
||||
//
|
||||
// We write to val first and then publish that we're done with
|
||||
// this slot by atomically writing to typ.
|
||||
slot.val = nil
|
||||
atomic.StorePointer(&slot.typ, nil)
|
||||
// At this point pushHead owns the slot.
|
||||
|
||||
return val, true
|
||||
}
|
||||
|
||||
// poolChain is a dynamically-sized version of poolDequeue.
|
||||
//
|
||||
// This is implemented as a doubly-linked list queue of poolDequeues
|
||||
// where each dequeue is double the size of the previous one. Once a
|
||||
// dequeue fills up, this allocates a new one and only ever pushes to
|
||||
// the latest dequeue. Pops happen from the other end of the list and
|
||||
// once a dequeue is exhausted, it gets removed from the list.
|
||||
type poolChain struct {
|
||||
// head is the poolDequeue to push to. This is only accessed
|
||||
// by the producer, so doesn't need to be synchronized.
|
||||
head *poolChainElt
|
||||
|
||||
// tail is the poolDequeue to popTail from. This is accessed
|
||||
// by consumers, so reads and writes must be atomic.
|
||||
tail atomic.Pointer[poolChainElt]
|
||||
}
|
||||
|
||||
type poolChainElt struct {
|
||||
poolDequeue
|
||||
|
||||
// next and prev link to the adjacent poolChainElts in this
|
||||
// poolChain.
|
||||
//
|
||||
// next is written atomically by the producer and read
|
||||
// atomically by the consumer. It only transitions from nil to
|
||||
// non-nil.
|
||||
//
|
||||
// prev is written atomically by the consumer and read
|
||||
// atomically by the producer. It only transitions from
|
||||
// non-nil to nil.
|
||||
next, prev atomic.Pointer[poolChainElt]
|
||||
}
|
||||
|
||||
func (c *poolChain) pushHead(val any) {
|
||||
d := c.head
|
||||
if d == nil {
|
||||
// Initialize the chain.
|
||||
const initSize = 8 // Must be a power of 2
|
||||
d = new(poolChainElt)
|
||||
d.vals = make([]eface, initSize)
|
||||
c.head = d
|
||||
c.tail.Store(d)
|
||||
}
|
||||
|
||||
if d.pushHead(val) {
|
||||
return
|
||||
}
|
||||
|
||||
// The current dequeue is full. Allocate a new one of twice
|
||||
// the size.
|
||||
newSize := len(d.vals) * 2
|
||||
if newSize >= dequeueLimit {
|
||||
// Can't make it any bigger.
|
||||
newSize = dequeueLimit
|
||||
}
|
||||
|
||||
d2 := &poolChainElt{}
|
||||
d2.prev.Store(d)
|
||||
d2.vals = make([]eface, newSize)
|
||||
c.head = d2
|
||||
d.next.Store(d2)
|
||||
d2.pushHead(val)
|
||||
}
|
||||
|
||||
func (c *poolChain) popHead() (any, bool) {
|
||||
d := c.head
|
||||
for d != nil {
|
||||
if val, ok := d.popHead(); ok {
|
||||
return val, ok
|
||||
}
|
||||
// There may still be unconsumed elements in the
|
||||
// previous dequeue, so try backing up.
|
||||
d = d.prev.Load()
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *poolChain) popTail() (any, bool) {
|
||||
d := c.tail.Load()
|
||||
if d == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
for {
|
||||
// It's important that we load the next pointer
|
||||
// *before* popping the tail. In general, d may be
|
||||
// transiently empty, but if next is non-nil before
|
||||
// the pop and the pop fails, then d is permanently
|
||||
// empty, which is the only condition under which it's
|
||||
// safe to drop d from the chain.
|
||||
d2 := d.next.Load()
|
||||
|
||||
if val, ok := d.popTail(); ok {
|
||||
return val, ok
|
||||
}
|
||||
|
||||
if d2 == nil {
|
||||
// This is the only dequeue. It's empty right
|
||||
// now, but could be pushed to in the future.
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// The tail of the chain has been drained, so move on
|
||||
// to the next dequeue. Try to drop it from the chain
|
||||
// so the next pop doesn't have to look at the empty
|
||||
// dequeue again.
|
||||
if c.tail.CompareAndSwap(d, d2) {
|
||||
// We won the race. Clear the prev pointer so
|
||||
// the garbage collector can collect the empty
|
||||
// dequeue and so popHead doesn't back up
|
||||
// further than necessary.
|
||||
d2.prev.Store(nil)
|
||||
}
|
||||
d = d2
|
||||
}
|
||||
}
|
||||
63
src/sync/runtime.go
Normal file
63
src/sync/runtime.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// defined in package runtime
|
||||
|
||||
// Semacquire waits until *s > 0 and then atomically decrements it.
|
||||
// It is intended as a simple sleep primitive for use by the synchronization
|
||||
// library and should not be used directly.
|
||||
func runtime_Semacquire(s *uint32)
|
||||
|
||||
// Semacquire(RW)Mutex(R) is like Semacquire, but for profiling contended
|
||||
// Mutexes and RWMutexes.
|
||||
// If lifo is true, queue waiter at the head of wait queue.
|
||||
// skipframes is the number of frames to omit during tracing, counting from
|
||||
// runtime_SemacquireMutex's caller.
|
||||
// The different forms of this function just tell the runtime how to present
|
||||
// the reason for waiting in a backtrace, and is used to compute some metrics.
|
||||
// Otherwise they're functionally identical.
|
||||
func runtime_SemacquireMutex(s *uint32, lifo bool, skipframes int)
|
||||
func runtime_SemacquireRWMutexR(s *uint32, lifo bool, skipframes int)
|
||||
func runtime_SemacquireRWMutex(s *uint32, lifo bool, skipframes int)
|
||||
|
||||
// Semrelease atomically increments *s and notifies a waiting goroutine
|
||||
// if one is blocked in Semacquire.
|
||||
// It is intended as a simple wakeup primitive for use by the synchronization
|
||||
// library and should not be used directly.
|
||||
// If handoff is true, pass count directly to the first waiter.
|
||||
// skipframes is the number of frames to omit during tracing, counting from
|
||||
// runtime_Semrelease's caller.
|
||||
func runtime_Semrelease(s *uint32, handoff bool, skipframes int)
|
||||
|
||||
// See runtime/sema.go for documentation.
|
||||
func runtime_notifyListAdd(l *notifyList) uint32
|
||||
|
||||
// See runtime/sema.go for documentation.
|
||||
func runtime_notifyListWait(l *notifyList, t uint32)
|
||||
|
||||
// See runtime/sema.go for documentation.
|
||||
func runtime_notifyListNotifyAll(l *notifyList)
|
||||
|
||||
// See runtime/sema.go for documentation.
|
||||
func runtime_notifyListNotifyOne(l *notifyList)
|
||||
|
||||
// Ensure that sync and runtime agree on size of notifyList.
|
||||
func runtime_notifyListCheck(size uintptr)
|
||||
func init() {
|
||||
var n notifyList
|
||||
runtime_notifyListCheck(unsafe.Sizeof(n))
|
||||
}
|
||||
|
||||
// Active spinning runtime support.
|
||||
// runtime_canSpin reports whether spinning makes sense at the moment.
|
||||
func runtime_canSpin(i int) bool
|
||||
|
||||
// runtime_doSpin does active spinning.
|
||||
func runtime_doSpin()
|
||||
|
||||
func runtime_nanotime() int64
|
||||
19
src/sync/runtime2.go
Normal file
19
src/sync/runtime2.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !goexperiment.staticlockranking
|
||||
|
||||
package sync
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// Approximation of notifyList in runtime/sema.go. Size and alignment must
|
||||
// agree.
|
||||
type notifyList struct {
|
||||
wait uint32
|
||||
notify uint32
|
||||
lock uintptr // key field of the mutex
|
||||
head unsafe.Pointer
|
||||
tail unsafe.Pointer
|
||||
}
|
||||
22
src/sync/runtime2_lockrank.go
Normal file
22
src/sync/runtime2_lockrank.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.staticlockranking
|
||||
|
||||
package sync
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// Approximation of notifyList in runtime/sema.go. Size and alignment must
|
||||
// agree.
|
||||
type notifyList struct {
|
||||
wait uint32
|
||||
notify uint32
|
||||
rank int // rank field of the mutex
|
||||
pad int // pad field of the mutex
|
||||
lock uintptr // key field of the mutex
|
||||
|
||||
head unsafe.Pointer
|
||||
tail unsafe.Pointer
|
||||
}
|
||||
75
src/sync/runtime_sema_test.go
Normal file
75
src/sync/runtime_sema_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
. "sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkSemaUncontended(b *testing.B) {
|
||||
type PaddedSem struct {
|
||||
sem uint32
|
||||
pad [32]uint32
|
||||
}
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
sem := new(PaddedSem)
|
||||
for pb.Next() {
|
||||
Runtime_Semrelease(&sem.sem, false, 0)
|
||||
Runtime_Semacquire(&sem.sem)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkSema(b *testing.B, block, work bool) {
|
||||
if b.N == 0 {
|
||||
return
|
||||
}
|
||||
sem := uint32(0)
|
||||
if block {
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for p := 0; p < runtime.GOMAXPROCS(0)/2; p++ {
|
||||
Runtime_Semacquire(&sem)
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
defer func() {
|
||||
<-done
|
||||
}()
|
||||
}
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
foo := 0
|
||||
for pb.Next() {
|
||||
Runtime_Semrelease(&sem, false, 0)
|
||||
if work {
|
||||
for i := 0; i < 100; i++ {
|
||||
foo *= 2
|
||||
foo /= 2
|
||||
}
|
||||
}
|
||||
Runtime_Semacquire(&sem)
|
||||
}
|
||||
_ = foo
|
||||
Runtime_Semrelease(&sem, false, 0)
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkSemaSyntNonblock(b *testing.B) {
|
||||
benchmarkSema(b, false, false)
|
||||
}
|
||||
|
||||
func BenchmarkSemaSyntBlock(b *testing.B) {
|
||||
benchmarkSema(b, true, false)
|
||||
}
|
||||
|
||||
func BenchmarkSemaWorkNonblock(b *testing.B) {
|
||||
benchmarkSema(b, false, true)
|
||||
}
|
||||
|
||||
func BenchmarkSemaWorkBlock(b *testing.B) {
|
||||
benchmarkSema(b, true, true)
|
||||
}
|
||||
245
src/sync/rwmutex.go
Normal file
245
src/sync/rwmutex.go
Normal file
@@ -0,0 +1,245 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"internal/race"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// There is a modified copy of this file in runtime/rwmutex.go.
|
||||
// If you make any changes here, see if you should make them there.
|
||||
|
||||
// A RWMutex is a reader/writer mutual exclusion lock.
|
||||
// The lock can be held by an arbitrary number of readers or a single writer.
|
||||
// The zero value for a RWMutex is an unlocked mutex.
|
||||
//
|
||||
// A RWMutex must not be copied after first use.
|
||||
//
|
||||
// If any goroutine calls [RWMutex.Lock] while the lock is already held by
|
||||
// one or more readers, concurrent calls to [RWMutex.RLock] will block until
|
||||
// the writer has acquired (and released) the lock, to ensure that
|
||||
// the lock eventually becomes available to the writer.
|
||||
// Note that this prohibits recursive read-locking.
|
||||
//
|
||||
// In the terminology of [the Go memory model],
|
||||
// the n'th call to [RWMutex.Unlock] “synchronizes before” the m'th call to Lock
|
||||
// for any n < m, just as for [Mutex].
|
||||
// For any call to RLock, there exists an n such that
|
||||
// the n'th call to Unlock “synchronizes before” that call to RLock,
|
||||
// and the corresponding call to [RWMutex.RUnlock] “synchronizes before”
|
||||
// the n+1'th call to Lock.
|
||||
//
|
||||
// [the Go memory model]: https://go.dev/ref/mem
|
||||
type RWMutex struct {
|
||||
w Mutex // held if there are pending writers
|
||||
writerSem uint32 // semaphore for writers to wait for completing readers
|
||||
readerSem uint32 // semaphore for readers to wait for completing writers
|
||||
readerCount atomic.Int32 // number of pending readers
|
||||
readerWait atomic.Int32 // number of departing readers
|
||||
}
|
||||
|
||||
const rwmutexMaxReaders = 1 << 30
|
||||
|
||||
// Happens-before relationships are indicated to the race detector via:
|
||||
// - Unlock -> Lock: readerSem
|
||||
// - Unlock -> RLock: readerSem
|
||||
// - RUnlock -> Lock: writerSem
|
||||
//
|
||||
// The methods below temporarily disable handling of race synchronization
|
||||
// events in order to provide the more precise model above to the race
|
||||
// detector.
|
||||
//
|
||||
// For example, atomic.AddInt32 in RLock should not appear to provide
|
||||
// acquire-release semantics, which would incorrectly synchronize racing
|
||||
// readers, thus potentially missing races.
|
||||
|
||||
// RLock locks rw for reading.
|
||||
//
|
||||
// It should not be used for recursive read locking; a blocked Lock
|
||||
// call excludes new readers from acquiring the lock. See the
|
||||
// documentation on the [RWMutex] type.
|
||||
func (rw *RWMutex) RLock() {
|
||||
if race.Enabled {
|
||||
_ = rw.w.state
|
||||
race.Disable()
|
||||
}
|
||||
if rw.readerCount.Add(1) < 0 {
|
||||
// A writer is pending, wait for it.
|
||||
runtime_SemacquireRWMutexR(&rw.readerSem, false, 0)
|
||||
}
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
race.Acquire(unsafe.Pointer(&rw.readerSem))
|
||||
}
|
||||
}
|
||||
|
||||
// TryRLock tries to lock rw for reading and reports whether it succeeded.
|
||||
//
|
||||
// Note that while correct uses of TryRLock do exist, they are rare,
|
||||
// and use of TryRLock is often a sign of a deeper problem
|
||||
// in a particular use of mutexes.
|
||||
func (rw *RWMutex) TryRLock() bool {
|
||||
if race.Enabled {
|
||||
_ = rw.w.state
|
||||
race.Disable()
|
||||
}
|
||||
for {
|
||||
c := rw.readerCount.Load()
|
||||
if c < 0 {
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
}
|
||||
return false
|
||||
}
|
||||
if rw.readerCount.CompareAndSwap(c, c+1) {
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
race.Acquire(unsafe.Pointer(&rw.readerSem))
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RUnlock undoes a single [RWMutex.RLock] call;
|
||||
// it does not affect other simultaneous readers.
|
||||
// It is a run-time error if rw is not locked for reading
|
||||
// on entry to RUnlock.
|
||||
func (rw *RWMutex) RUnlock() {
|
||||
if race.Enabled {
|
||||
_ = rw.w.state
|
||||
race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
|
||||
race.Disable()
|
||||
}
|
||||
if r := rw.readerCount.Add(-1); r < 0 {
|
||||
// Outlined slow-path to allow the fast-path to be inlined
|
||||
rw.rUnlockSlow(r)
|
||||
}
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
}
|
||||
}
|
||||
|
||||
func (rw *RWMutex) rUnlockSlow(r int32) {
|
||||
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
|
||||
race.Enable()
|
||||
fatal("sync: RUnlock of unlocked RWMutex")
|
||||
}
|
||||
// A writer is pending.
|
||||
if rw.readerWait.Add(-1) == 0 {
|
||||
// The last reader unblocks the writer.
|
||||
runtime_Semrelease(&rw.writerSem, false, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Lock locks rw for writing.
|
||||
// If the lock is already locked for reading or writing,
|
||||
// Lock blocks until the lock is available.
|
||||
func (rw *RWMutex) Lock() {
|
||||
if race.Enabled {
|
||||
_ = rw.w.state
|
||||
race.Disable()
|
||||
}
|
||||
// First, resolve competition with other writers.
|
||||
rw.w.Lock()
|
||||
// Announce to readers there is a pending writer.
|
||||
r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders
|
||||
// Wait for active readers.
|
||||
if r != 0 && rw.readerWait.Add(r) != 0 {
|
||||
runtime_SemacquireRWMutex(&rw.writerSem, false, 0)
|
||||
}
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
race.Acquire(unsafe.Pointer(&rw.readerSem))
|
||||
race.Acquire(unsafe.Pointer(&rw.writerSem))
|
||||
}
|
||||
}
|
||||
|
||||
// TryLock tries to lock rw for writing and reports whether it succeeded.
|
||||
//
|
||||
// Note that while correct uses of TryLock do exist, they are rare,
|
||||
// and use of TryLock is often a sign of a deeper problem
|
||||
// in a particular use of mutexes.
|
||||
func (rw *RWMutex) TryLock() bool {
|
||||
if race.Enabled {
|
||||
_ = rw.w.state
|
||||
race.Disable()
|
||||
}
|
||||
if !rw.w.TryLock() {
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
}
|
||||
return false
|
||||
}
|
||||
if !rw.readerCount.CompareAndSwap(0, -rwmutexMaxReaders) {
|
||||
rw.w.Unlock()
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
}
|
||||
return false
|
||||
}
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
race.Acquire(unsafe.Pointer(&rw.readerSem))
|
||||
race.Acquire(unsafe.Pointer(&rw.writerSem))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Unlock unlocks rw for writing. It is a run-time error if rw is
|
||||
// not locked for writing on entry to Unlock.
|
||||
//
|
||||
// As with Mutexes, a locked [RWMutex] is not associated with a particular
|
||||
// goroutine. One goroutine may [RWMutex.RLock] ([RWMutex.Lock]) a RWMutex and then
|
||||
// arrange for another goroutine to [RWMutex.RUnlock] ([RWMutex.Unlock]) it.
|
||||
func (rw *RWMutex) Unlock() {
|
||||
if race.Enabled {
|
||||
_ = rw.w.state
|
||||
race.Release(unsafe.Pointer(&rw.readerSem))
|
||||
race.Disable()
|
||||
}
|
||||
|
||||
// Announce to readers there is no active writer.
|
||||
r := rw.readerCount.Add(rwmutexMaxReaders)
|
||||
if r >= rwmutexMaxReaders {
|
||||
race.Enable()
|
||||
fatal("sync: Unlock of unlocked RWMutex")
|
||||
}
|
||||
// Unblock blocked readers, if any.
|
||||
for i := 0; i < int(r); i++ {
|
||||
runtime_Semrelease(&rw.readerSem, false, 0)
|
||||
}
|
||||
// Allow other writers to proceed.
|
||||
rw.w.Unlock()
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
}
|
||||
}
|
||||
|
||||
// syscall_hasWaitingReaders reports whether any goroutine is waiting
|
||||
// to acquire a read lock on rw. This exists because syscall.ForkLock
|
||||
// is an RWMutex, and we can't change that without breaking compatibility.
|
||||
// We don't need or want RWMutex semantics for ForkLock, and we use
|
||||
// this private API to avoid having to change the type of ForkLock.
|
||||
// For more details see the syscall package.
|
||||
//
|
||||
//go:linkname syscall_hasWaitingReaders syscall.hasWaitingReaders
|
||||
func syscall_hasWaitingReaders(rw *RWMutex) bool {
|
||||
r := rw.readerCount.Load()
|
||||
return r < 0 && r+rwmutexMaxReaders > 0
|
||||
}
|
||||
|
||||
// RLocker returns a [Locker] interface that implements
|
||||
// the [Locker.Lock] and [Locker.Unlock] methods by calling rw.RLock and rw.RUnlock.
|
||||
func (rw *RWMutex) RLocker() Locker {
|
||||
return (*rlocker)(rw)
|
||||
}
|
||||
|
||||
type rlocker RWMutex
|
||||
|
||||
func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
|
||||
func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
|
||||
245
src/sync/rwmutex_test.go
Normal file
245
src/sync/rwmutex_test.go
Normal file
@@ -0,0 +1,245 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// GOMAXPROCS=10 go test
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
. "sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// There is a modified copy of this file in runtime/rwmutex_test.go.
|
||||
// If you make any changes here, see if you should make them there.
|
||||
|
||||
func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
|
||||
m.RLock()
|
||||
clocked <- true
|
||||
<-cunlock
|
||||
m.RUnlock()
|
||||
cdone <- true
|
||||
}
|
||||
|
||||
func doTestParallelReaders(numReaders, gomaxprocs int) {
|
||||
runtime.GOMAXPROCS(gomaxprocs)
|
||||
var m RWMutex
|
||||
clocked := make(chan bool)
|
||||
cunlock := make(chan bool)
|
||||
cdone := make(chan bool)
|
||||
for i := 0; i < numReaders; i++ {
|
||||
go parallelReader(&m, clocked, cunlock, cdone)
|
||||
}
|
||||
// Wait for all parallel RLock()s to succeed.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
<-clocked
|
||||
}
|
||||
for i := 0; i < numReaders; i++ {
|
||||
cunlock <- true
|
||||
}
|
||||
// Wait for the goroutines to finish.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
<-cdone
|
||||
}
|
||||
}
|
||||
|
||||
func TestParallelReaders(t *testing.T) {
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
|
||||
doTestParallelReaders(1, 4)
|
||||
doTestParallelReaders(3, 4)
|
||||
doTestParallelReaders(4, 2)
|
||||
}
|
||||
|
||||
func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
|
||||
for i := 0; i < num_iterations; i++ {
|
||||
rwm.RLock()
|
||||
n := atomic.AddInt32(activity, 1)
|
||||
if n < 1 || n >= 10000 {
|
||||
rwm.RUnlock()
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
}
|
||||
atomic.AddInt32(activity, -1)
|
||||
rwm.RUnlock()
|
||||
}
|
||||
cdone <- true
|
||||
}
|
||||
|
||||
func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
|
||||
for i := 0; i < num_iterations; i++ {
|
||||
rwm.Lock()
|
||||
n := atomic.AddInt32(activity, 10000)
|
||||
if n != 10000 {
|
||||
rwm.Unlock()
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
}
|
||||
atomic.AddInt32(activity, -10000)
|
||||
rwm.Unlock()
|
||||
}
|
||||
cdone <- true
|
||||
}
|
||||
|
||||
func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
|
||||
runtime.GOMAXPROCS(gomaxprocs)
|
||||
// Number of active readers + 10000 * number of active writers.
|
||||
var activity int32
|
||||
var rwm RWMutex
|
||||
cdone := make(chan bool)
|
||||
go writer(&rwm, num_iterations, &activity, cdone)
|
||||
var i int
|
||||
for i = 0; i < numReaders/2; i++ {
|
||||
go reader(&rwm, num_iterations, &activity, cdone)
|
||||
}
|
||||
go writer(&rwm, num_iterations, &activity, cdone)
|
||||
for ; i < numReaders; i++ {
|
||||
go reader(&rwm, num_iterations, &activity, cdone)
|
||||
}
|
||||
// Wait for the 2 writers and all readers to finish.
|
||||
for i := 0; i < 2+numReaders; i++ {
|
||||
<-cdone
|
||||
}
|
||||
}
|
||||
|
||||
func TestRWMutex(t *testing.T) {
|
||||
var m RWMutex
|
||||
|
||||
m.Lock()
|
||||
if m.TryLock() {
|
||||
t.Fatalf("TryLock succeeded with mutex locked")
|
||||
}
|
||||
if m.TryRLock() {
|
||||
t.Fatalf("TryRLock succeeded with mutex locked")
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
if !m.TryLock() {
|
||||
t.Fatalf("TryLock failed with mutex unlocked")
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
if !m.TryRLock() {
|
||||
t.Fatalf("TryRLock failed with mutex unlocked")
|
||||
}
|
||||
if !m.TryRLock() {
|
||||
t.Fatalf("TryRLock failed with mutex rlocked")
|
||||
}
|
||||
if m.TryLock() {
|
||||
t.Fatalf("TryLock succeeded with mutex rlocked")
|
||||
}
|
||||
m.RUnlock()
|
||||
m.RUnlock()
|
||||
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
|
||||
n := 1000
|
||||
if testing.Short() {
|
||||
n = 5
|
||||
}
|
||||
HammerRWMutex(1, 1, n)
|
||||
HammerRWMutex(1, 3, n)
|
||||
HammerRWMutex(1, 10, n)
|
||||
HammerRWMutex(4, 1, n)
|
||||
HammerRWMutex(4, 3, n)
|
||||
HammerRWMutex(4, 10, n)
|
||||
HammerRWMutex(10, 1, n)
|
||||
HammerRWMutex(10, 3, n)
|
||||
HammerRWMutex(10, 10, n)
|
||||
HammerRWMutex(10, 5, n)
|
||||
}
|
||||
|
||||
func TestRLocker(t *testing.T) {
|
||||
var wl RWMutex
|
||||
var rl Locker
|
||||
wlocked := make(chan bool, 1)
|
||||
rlocked := make(chan bool, 1)
|
||||
rl = wl.RLocker()
|
||||
n := 10
|
||||
go func() {
|
||||
for i := 0; i < n; i++ {
|
||||
rl.Lock()
|
||||
rl.Lock()
|
||||
rlocked <- true
|
||||
wl.Lock()
|
||||
wlocked <- true
|
||||
}
|
||||
}()
|
||||
for i := 0; i < n; i++ {
|
||||
<-rlocked
|
||||
rl.Unlock()
|
||||
select {
|
||||
case <-wlocked:
|
||||
t.Fatal("RLocker() didn't read-lock it")
|
||||
default:
|
||||
}
|
||||
rl.Unlock()
|
||||
<-wlocked
|
||||
select {
|
||||
case <-rlocked:
|
||||
t.Fatal("RLocker() didn't respect the write lock")
|
||||
default:
|
||||
}
|
||||
wl.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRWMutexUncontended(b *testing.B) {
|
||||
type PaddedRWMutex struct {
|
||||
RWMutex
|
||||
pad [32]uint32
|
||||
}
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rwm PaddedRWMutex
|
||||
for pb.Next() {
|
||||
rwm.RLock()
|
||||
rwm.RLock()
|
||||
rwm.RUnlock()
|
||||
rwm.RUnlock()
|
||||
rwm.Lock()
|
||||
rwm.Unlock()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
|
||||
var rwm RWMutex
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
foo := 0
|
||||
for pb.Next() {
|
||||
foo++
|
||||
if foo%writeRatio == 0 {
|
||||
rwm.Lock()
|
||||
rwm.Unlock()
|
||||
} else {
|
||||
rwm.RLock()
|
||||
for i := 0; i != localWork; i += 1 {
|
||||
foo *= 2
|
||||
foo /= 2
|
||||
}
|
||||
rwm.RUnlock()
|
||||
}
|
||||
}
|
||||
_ = foo
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkRWMutexWrite100(b *testing.B) {
|
||||
benchmarkRWMutex(b, 0, 100)
|
||||
}
|
||||
|
||||
func BenchmarkRWMutexWrite10(b *testing.B) {
|
||||
benchmarkRWMutex(b, 0, 10)
|
||||
}
|
||||
|
||||
func BenchmarkRWMutexWorkWrite100(b *testing.B) {
|
||||
benchmarkRWMutex(b, 100, 100)
|
||||
}
|
||||
|
||||
func BenchmarkRWMutexWorkWrite10(b *testing.B) {
|
||||
benchmarkRWMutex(b, 100, 10)
|
||||
}
|
||||
129
src/sync/waitgroup.go
Normal file
129
src/sync/waitgroup.go
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"internal/race"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// A WaitGroup waits for a collection of goroutines to finish.
|
||||
// The main goroutine calls [WaitGroup.Add] to set the number of
|
||||
// goroutines to wait for. Then each of the goroutines
|
||||
// runs and calls [WaitGroup.Done] when finished. At the same time,
|
||||
// [WaitGroup.Wait] can be used to block until all goroutines have finished.
|
||||
//
|
||||
// A WaitGroup must not be copied after first use.
|
||||
//
|
||||
// In the terminology of [the Go memory model], a call to [WaitGroup.Done]
|
||||
// “synchronizes before” the return of any Wait call that it unblocks.
|
||||
//
|
||||
// [the Go memory model]: https://go.dev/ref/mem
|
||||
type WaitGroup struct {
|
||||
noCopy noCopy
|
||||
|
||||
state atomic.Uint64 // high 32 bits are counter, low 32 bits are waiter count.
|
||||
sema uint32
|
||||
}
|
||||
|
||||
// Add adds delta, which may be negative, to the [WaitGroup] counter.
|
||||
// If the counter becomes zero, all goroutines blocked on [WaitGroup.Wait] are released.
|
||||
// If the counter goes negative, Add panics.
|
||||
//
|
||||
// Note that calls with a positive delta that occur when the counter is zero
|
||||
// must happen before a Wait. Calls with a negative delta, or calls with a
|
||||
// positive delta that start when the counter is greater than zero, may happen
|
||||
// at any time.
|
||||
// Typically this means the calls to Add should execute before the statement
|
||||
// creating the goroutine or other event to be waited for.
|
||||
// If a WaitGroup is reused to wait for several independent sets of events,
|
||||
// new Add calls must happen after all previous Wait calls have returned.
|
||||
// See the WaitGroup example.
|
||||
func (wg *WaitGroup) Add(delta int) {
|
||||
if race.Enabled {
|
||||
if delta < 0 {
|
||||
// Synchronize decrements with Wait.
|
||||
race.ReleaseMerge(unsafe.Pointer(wg))
|
||||
}
|
||||
race.Disable()
|
||||
defer race.Enable()
|
||||
}
|
||||
state := wg.state.Add(uint64(delta) << 32)
|
||||
v := int32(state >> 32)
|
||||
w := uint32(state)
|
||||
if race.Enabled && delta > 0 && v == int32(delta) {
|
||||
// The first increment must be synchronized with Wait.
|
||||
// Need to model this as a read, because there can be
|
||||
// several concurrent wg.counter transitions from 0.
|
||||
race.Read(unsafe.Pointer(&wg.sema))
|
||||
}
|
||||
if v < 0 {
|
||||
panic("sync: negative WaitGroup counter")
|
||||
}
|
||||
if w != 0 && delta > 0 && v == int32(delta) {
|
||||
panic("sync: WaitGroup misuse: Add called concurrently with Wait")
|
||||
}
|
||||
if v > 0 || w == 0 {
|
||||
return
|
||||
}
|
||||
// This goroutine has set counter to 0 when waiters > 0.
|
||||
// Now there can't be concurrent mutations of state:
|
||||
// - Adds must not happen concurrently with Wait,
|
||||
// - Wait does not increment waiters if it sees counter == 0.
|
||||
// Still do a cheap sanity check to detect WaitGroup misuse.
|
||||
if wg.state.Load() != state {
|
||||
panic("sync: WaitGroup misuse: Add called concurrently with Wait")
|
||||
}
|
||||
// Reset waiters count to 0.
|
||||
wg.state.Store(0)
|
||||
for ; w != 0; w-- {
|
||||
runtime_Semrelease(&wg.sema, false, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Done decrements the [WaitGroup] counter by one.
|
||||
func (wg *WaitGroup) Done() {
|
||||
wg.Add(-1)
|
||||
}
|
||||
|
||||
// Wait blocks until the [WaitGroup] counter is zero.
|
||||
func (wg *WaitGroup) Wait() {
|
||||
if race.Enabled {
|
||||
race.Disable()
|
||||
}
|
||||
for {
|
||||
state := wg.state.Load()
|
||||
v := int32(state >> 32)
|
||||
w := uint32(state)
|
||||
if v == 0 {
|
||||
// Counter is 0, no need to wait.
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
race.Acquire(unsafe.Pointer(wg))
|
||||
}
|
||||
return
|
||||
}
|
||||
// Increment waiters count.
|
||||
if wg.state.CompareAndSwap(state, state+1) {
|
||||
if race.Enabled && w == 0 {
|
||||
// Wait must be synchronized with the first Add.
|
||||
// Need to model this is as a write to race with the read in Add.
|
||||
// As a consequence, can do the write only for the first waiter,
|
||||
// otherwise concurrent Waits will race with each other.
|
||||
race.Write(unsafe.Pointer(&wg.sema))
|
||||
}
|
||||
runtime_Semacquire(&wg.sema)
|
||||
if wg.state.Load() != 0 {
|
||||
panic("sync: WaitGroup is reused before previous Wait has returned")
|
||||
}
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
race.Acquire(unsafe.Pointer(wg))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
175
src/sync/waitgroup_test.go
Normal file
175
src/sync/waitgroup_test.go
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync_test
|
||||
|
||||
import (
|
||||
. "sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testWaitGroup(t *testing.T, wg1 *WaitGroup, wg2 *WaitGroup) {
|
||||
n := 16
|
||||
wg1.Add(n)
|
||||
wg2.Add(n)
|
||||
exited := make(chan bool, n)
|
||||
for i := 0; i != n; i++ {
|
||||
go func() {
|
||||
wg1.Done()
|
||||
wg2.Wait()
|
||||
exited <- true
|
||||
}()
|
||||
}
|
||||
wg1.Wait()
|
||||
for i := 0; i != n; i++ {
|
||||
select {
|
||||
case <-exited:
|
||||
t.Fatal("WaitGroup released group too soon")
|
||||
default:
|
||||
}
|
||||
wg2.Done()
|
||||
}
|
||||
for i := 0; i != n; i++ {
|
||||
<-exited // Will block if barrier fails to unlock someone.
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitGroup(t *testing.T) {
|
||||
wg1 := &WaitGroup{}
|
||||
wg2 := &WaitGroup{}
|
||||
|
||||
// Run the same test a few times to ensure barrier is in a proper state.
|
||||
for i := 0; i != 8; i++ {
|
||||
testWaitGroup(t, wg1, wg2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitGroupMisuse(t *testing.T) {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != "sync: negative WaitGroup counter" {
|
||||
t.Fatalf("Unexpected panic: %#v", err)
|
||||
}
|
||||
}()
|
||||
wg := &WaitGroup{}
|
||||
wg.Add(1)
|
||||
wg.Done()
|
||||
wg.Done()
|
||||
t.Fatal("Should panic")
|
||||
}
|
||||
|
||||
func TestWaitGroupRace(t *testing.T) {
|
||||
// Run this test for about 1ms.
|
||||
for i := 0; i < 1000; i++ {
|
||||
wg := &WaitGroup{}
|
||||
n := new(int32)
|
||||
// spawn goroutine 1
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
atomic.AddInt32(n, 1)
|
||||
wg.Done()
|
||||
}()
|
||||
// spawn goroutine 2
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
atomic.AddInt32(n, 1)
|
||||
wg.Done()
|
||||
}()
|
||||
// Wait for goroutine 1 and 2
|
||||
wg.Wait()
|
||||
if atomic.LoadInt32(n) != 2 {
|
||||
t.Fatal("Spurious wakeup from Wait")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitGroupAlign(t *testing.T) {
|
||||
type X struct {
|
||||
x byte
|
||||
wg WaitGroup
|
||||
}
|
||||
var x X
|
||||
x.wg.Add(1)
|
||||
go func(x *X) {
|
||||
x.wg.Done()
|
||||
}(&x)
|
||||
x.wg.Wait()
|
||||
}
|
||||
|
||||
func BenchmarkWaitGroupUncontended(b *testing.B) {
|
||||
type PaddedWaitGroup struct {
|
||||
WaitGroup
|
||||
pad [128]uint8
|
||||
}
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var wg PaddedWaitGroup
|
||||
for pb.Next() {
|
||||
wg.Add(1)
|
||||
wg.Done()
|
||||
wg.Wait()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
|
||||
var wg WaitGroup
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
foo := 0
|
||||
for pb.Next() {
|
||||
wg.Add(1)
|
||||
for i := 0; i < localWork; i++ {
|
||||
foo *= 2
|
||||
foo /= 2
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
_ = foo
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkWaitGroupAddDone(b *testing.B) {
|
||||
benchmarkWaitGroupAddDone(b, 0)
|
||||
}
|
||||
|
||||
func BenchmarkWaitGroupAddDoneWork(b *testing.B) {
|
||||
benchmarkWaitGroupAddDone(b, 100)
|
||||
}
|
||||
|
||||
func benchmarkWaitGroupWait(b *testing.B, localWork int) {
|
||||
var wg WaitGroup
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
foo := 0
|
||||
for pb.Next() {
|
||||
wg.Wait()
|
||||
for i := 0; i < localWork; i++ {
|
||||
foo *= 2
|
||||
foo /= 2
|
||||
}
|
||||
}
|
||||
_ = foo
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkWaitGroupWait(b *testing.B) {
|
||||
benchmarkWaitGroupWait(b, 0)
|
||||
}
|
||||
|
||||
func BenchmarkWaitGroupWaitWork(b *testing.B) {
|
||||
benchmarkWaitGroupWait(b, 100)
|
||||
}
|
||||
|
||||
func BenchmarkWaitGroupActuallyWait(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var wg WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user