build: separate compiler and libs

This commit is contained in:
Li Jie
2025-01-07 21:49:08 +08:00
parent b0123567cd
commit 1172e5bdce
559 changed files with 190 additions and 176 deletions

View File

@@ -0,0 +1,139 @@
/*
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atomic
import (
"unsafe"
)
const (
LLGoPackage = true
)
type valtype interface {
~int | ~uint | ~uintptr | ~int32 | ~uint32 | ~int64 | ~uint64 | ~unsafe.Pointer
}
//go:linkname SwapInt32 llgo.atomicXchg
func SwapInt32(addr *int32, new int32) (old int32)
//go:linkname SwapInt64 llgo.atomicXchg
func SwapInt64(addr *int64, new int64) (old int64)
//go:linkname SwapUint32 llgo.atomicXchg
func SwapUint32(addr *uint32, new uint32) (old uint32)
//go:linkname SwapUint64 llgo.atomicXchg
func SwapUint64(addr *uint64, new uint64) (old uint64)
//go:linkname SwapUintptr llgo.atomicXchg
func SwapUintptr(addr *uintptr, new uintptr) (old uintptr)
//go:linkname SwapPointer llgo.atomicXchg
func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)
// llgo:link atomicCmpXchg llgo.atomicCmpXchg
func atomicCmpXchg[T valtype](ptr *T, old, new T) (T, bool) { return old, false }
func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool) {
_, swapped = atomicCmpXchg(addr, old, new)
return
}
func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool) {
_, swapped = atomicCmpXchg(addr, old, new)
return
}
func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool) {
_, swapped = atomicCmpXchg(addr, old, new)
return
}
func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool) {
_, swapped = atomicCmpXchg(addr, old, new)
return
}
func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool) {
_, swapped = atomicCmpXchg(addr, old, new)
return
}
func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) {
_, swapped = atomicCmpXchg(addr, old, new)
return
}
// llgo:link atomicAdd llgo.atomicAdd
func atomicAdd[T valtype](ptr *T, v T) T { return v }
func AddInt32(addr *int32, delta int32) (new int32) {
return atomicAdd(addr, delta) + delta
}
func AddUint32(addr *uint32, delta uint32) (new uint32) {
return atomicAdd(addr, delta) + delta
}
func AddInt64(addr *int64, delta int64) (new int64) {
return atomicAdd(addr, delta) + delta
}
func AddUint64(addr *uint64, delta uint64) (new uint64) {
return atomicAdd(addr, delta) + delta
}
func AddUintptr(addr *uintptr, delta uintptr) (new uintptr) {
return atomicAdd(addr, delta) + delta
}
//go:linkname LoadInt32 llgo.atomicLoad
func LoadInt32(addr *int32) (val int32)
//go:linkname LoadInt64 llgo.atomicLoad
func LoadInt64(addr *int64) (val int64)
//go:linkname LoadUint32 llgo.atomicLoad
func LoadUint32(addr *uint32) (val uint32)
//go:linkname LoadUint64 llgo.atomicLoad
func LoadUint64(addr *uint64) (val uint64)
//go:linkname LoadUintptr llgo.atomicLoad
func LoadUintptr(addr *uintptr) (val uintptr)
//go:linkname LoadPointer llgo.atomicLoad
func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
//go:linkname StoreInt32 llgo.atomicStore
func StoreInt32(addr *int32, val int32)
//go:linkname StoreInt64 llgo.atomicStore
func StoreInt64(addr *int64, val int64)
//go:linkname StoreUint32 llgo.atomicStore
func StoreUint32(addr *uint32, val uint32)
//go:linkname StoreUint64 llgo.atomicStore
func StoreUint64(addr *uint64, val uint64)
//go:linkname StoreUintptr llgo.atomicStore
func StoreUintptr(addr *uintptr, val uintptr)
//go:linkname StorePointer llgo.atomicStore
func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)

View File

@@ -0,0 +1,176 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package atomic
import (
"unsafe"
)
type Value struct {
v any
}
// efaceWords is interface{} internal representation.
type efaceWords struct {
typ unsafe.Pointer
data unsafe.Pointer
}
// Load returns the value set by the most recent Store.
// It returns nil if there has been no call to Store for this Value.
func (v *Value) Load() (val any) {
vp := (*efaceWords)(unsafe.Pointer(v))
typ := LoadPointer(&vp.typ)
if typ == nil || typ == unsafe.Pointer(&firstStoreInProgress) {
// First store not yet completed.
return nil
}
data := LoadPointer(&vp.data)
vlp := (*efaceWords)(unsafe.Pointer(&val))
vlp.typ = typ
vlp.data = data
return
}
var firstStoreInProgress byte
// Store sets the value of the Value v to val.
// All calls to Store for a given Value must use values of the same concrete type.
// Store of an inconsistent type panics, as does Store(nil).
func (v *Value) Store(val any) {
if val == nil {
panic("sync/atomic: store of nil value into Value")
}
vp := (*efaceWords)(unsafe.Pointer(v))
vlp := (*efaceWords)(unsafe.Pointer(&val))
for {
typ := LoadPointer(&vp.typ)
if typ == nil {
// Attempt to start first store.
// Disable preemption so that other goroutines can use
// active spin wait to wait for completion.
if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
continue
}
// Complete first store.
StorePointer(&vp.data, vlp.data)
StorePointer(&vp.typ, vlp.typ)
return
}
if typ == unsafe.Pointer(&firstStoreInProgress) {
// First store in progress. Wait.
// Since we disable preemption around the first store,
// we can wait with active spinning.
continue
}
// First store completed. Check type and overwrite data.
if typ != vlp.typ {
panic("sync/atomic: store of inconsistently typed value into Value")
}
StorePointer(&vp.data, vlp.data)
return
}
}
// Swap stores new into Value and returns the previous value. It returns nil if
// the Value is empty.
//
// All calls to Swap for a given Value must use values of the same concrete
// type. Swap of an inconsistent type panics, as does Swap(nil).
func (v *Value) Swap(new any) (old any) {
if new == nil {
panic("sync/atomic: swap of nil value into Value")
}
vp := (*efaceWords)(unsafe.Pointer(v))
np := (*efaceWords)(unsafe.Pointer(&new))
for {
typ := LoadPointer(&vp.typ)
if typ == nil {
// Attempt to start first store.
// Disable preemption so that other goroutines can use
// active spin wait to wait for completion; and so that
// GC does not see the fake type accidentally.
if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
continue
}
// Complete first store.
StorePointer(&vp.data, np.data)
StorePointer(&vp.typ, np.typ)
return nil
}
if typ == unsafe.Pointer(&firstStoreInProgress) {
// First store in progress. Wait.
// Since we disable preemption around the first store,
// we can wait with active spinning.
continue
}
// First store completed. Check type and overwrite data.
if typ != np.typ {
panic("sync/atomic: swap of inconsistently typed value into Value")
}
op := (*efaceWords)(unsafe.Pointer(&old))
op.typ, op.data = np.typ, SwapPointer(&vp.data, np.data)
return old
}
}
// CompareAndSwap executes the compare-and-swap operation for the Value.
//
// All calls to CompareAndSwap for a given Value must use values of the same
// concrete type. CompareAndSwap of an inconsistent type panics, as does
// CompareAndSwap(old, nil).
func (v *Value) CompareAndSwap(old, new any) (swapped bool) {
if new == nil {
panic("sync/atomic: compare and swap of nil value into Value")
}
vp := (*efaceWords)(unsafe.Pointer(v))
np := (*efaceWords)(unsafe.Pointer(&new))
op := (*efaceWords)(unsafe.Pointer(&old))
if op.typ != nil && np.typ != op.typ {
panic("sync/atomic: compare and swap of inconsistently typed values")
}
for {
typ := LoadPointer(&vp.typ)
if typ == nil {
if old != nil {
return false
}
// Attempt to start first store.
// Disable preemption so that other goroutines can use
// active spin wait to wait for completion; and so that
// GC does not see the fake type accidentally.
if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
continue
}
// Complete first store.
StorePointer(&vp.data, np.data)
StorePointer(&vp.typ, np.typ)
return true
}
if typ == unsafe.Pointer(&firstStoreInProgress) {
// First store in progress. Wait.
// Since we disable preemption around the first store,
// we can wait with active spinning.
continue
}
// First store completed. Check type and overwrite data.
if typ != np.typ {
panic("sync/atomic: compare and swap of inconsistently typed value into Value")
}
// Compare old and current via runtime equality check.
// This allows value types to be compared, something
// not offered by the package functions.
// CompareAndSwapPointer below only ensures vp.data
// has not changed since LoadPointer.
data := LoadPointer(&vp.data)
var i any
(*efaceWords)(unsafe.Pointer(&i)).typ = typ
(*efaceWords)(unsafe.Pointer(&i)).data = data
if i != old {
return false
}
return CompareAndSwapPointer(&vp.data, data, np.data)
}
}

View File

@@ -0,0 +1,18 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync
// noCopy may be added to structs which must not be copied
// after the first use.
//
// See https://golang.org/issues/8005#issuecomment-190753527
// for details.
//
// Note that it must not be embedded, due to the Lock and Unlock methods.
type noCopy struct{}
// Lock is a no-op used by -copylocks checker from `go vet`.
func (*noCopy) Lock() {}
func (*noCopy) Unlock() {}

View File

@@ -0,0 +1,515 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync
import (
"sync/atomic"
)
// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
// by multiple goroutines without additional locking or coordination.
// Loads, stores, and deletes run in amortized constant time.
//
// The Map type is specialized. Most code should use a plain Go map instead,
// with separate locking or coordination, for better type safety and to make it
// easier to maintain other invariants along with the map content.
//
// The Map type is optimized for two common use cases: (1) when the entry for a given
// key is only ever written once but read many times, as in caches that only grow,
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
// sets of keys. In these two cases, use of a Map may significantly reduce lock
// contention compared to a Go map paired with a separate Mutex or RWMutex.
//
// The zero Map is empty and ready for use. A Map must not be copied after first use.
//
// In the terminology of the Go memory model, Map arranges that a write operation
// “synchronizes before” any read operation that observes the effect of the write, where
// read and write operations are defined as follows.
// Load, LoadAndDelete, LoadOrStore, Swap, CompareAndSwap, and CompareAndDelete
// are read operations; Delete, LoadAndDelete, Store, and Swap are write operations;
// LoadOrStore is a write operation when it returns loaded set to false;
// CompareAndSwap is a write operation when it returns swapped set to true;
// and CompareAndDelete is a write operation when it returns deleted set to true.
type Map struct {
mu Mutex
// read contains the portion of the map's contents that are safe for
// concurrent access (with or without mu held).
//
// The read field itself is always safe to load, but must only be stored with
// mu held.
//
// Entries stored in read may be updated concurrently without mu, but updating
// a previously-expunged entry requires that the entry be copied to the dirty
// map and unexpunged with mu held.
read atomic.Pointer[readOnly]
// dirty contains the portion of the map's contents that require mu to be
// held. To ensure that the dirty map can be promoted to the read map quickly,
// it also includes all of the non-expunged entries in the read map.
//
// Expunged entries are not stored in the dirty map. An expunged entry in the
// clean map must be unexpunged and added to the dirty map before a new value
// can be stored to it.
//
// If the dirty map is nil, the next write to the map will initialize it by
// making a shallow copy of the clean map, omitting stale entries.
dirty map[any]*entry
// misses counts the number of loads since the read map was last updated that
// needed to lock mu to determine whether the key was present.
//
// Once enough misses have occurred to cover the cost of copying the dirty
// map, the dirty map will be promoted to the read map (in the unamended
// state) and the next store to the map will make a new dirty copy.
misses int
}
// readOnly is an immutable struct stored atomically in the Map.read field.
type readOnly struct {
m map[any]*entry
amended bool // true if the dirty map contains some key not in m.
}
// expunged is an arbitrary pointer that marks entries which have been deleted
// from the dirty map.
var expunged = new(any)
// An entry is a slot in the map corresponding to a particular key.
type entry struct {
// p points to the interface{} value stored for the entry.
//
// If p == nil, the entry has been deleted, and either m.dirty == nil or
// m.dirty[key] is e.
//
// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
// is missing from m.dirty.
//
// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
// != nil, in m.dirty[key].
//
// An entry can be deleted by atomic replacement with nil: when m.dirty is
// next created, it will atomically replace nil with expunged and leave
// m.dirty[key] unset.
//
// An entry's associated value can be updated by atomic replacement, provided
// p != expunged. If p == expunged, an entry's associated value can be updated
// only after first setting m.dirty[key] = e so that lookups using the dirty
// map find the entry.
p atomic.Pointer[any]
}
func newEntry(i any) *entry {
e := &entry{}
e.p.Store(&i)
return e
}
func (m *Map) loadReadOnly() readOnly {
if p := m.read.Load(); p != nil {
return *p
}
return readOnly{}
}
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func (m *Map) Load(key any) (value any, ok bool) {
read := m.loadReadOnly()
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
// Avoid reporting a spurious miss if m.dirty got promoted while we were
// blocked on m.mu. (If further loads of the same key will not miss, it's
// not worth copying the dirty map for this key.)
read = m.loadReadOnly()
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
if !ok {
return nil, false
}
return e.load()
}
func (e *entry) load() (value any, ok bool) {
p := e.p.Load()
if p == nil || p == expunged {
return nil, false
}
return *p, true
}
// Store sets the value for a key.
func (m *Map) Store(key, value any) {
_, _ = m.Swap(key, value)
}
// tryCompareAndSwap compare the entry with the given old value and swaps
// it with a new value if the entry is equal to the old value, and the entry
// has not been expunged.
//
// If the entry is expunged, tryCompareAndSwap returns false and leaves
// the entry unchanged.
func (e *entry) tryCompareAndSwap(old, new any) bool {
p := e.p.Load()
if p == nil || p == expunged || *p != old {
return false
}
// Copy the interface after the first load to make this method more amenable
// to escape analysis: if the comparison fails from the start, we shouldn't
// bother heap-allocating an interface value to store.
nc := new
for {
if e.p.CompareAndSwap(p, &nc) {
return true
}
p = e.p.Load()
if p == nil || p == expunged || *p != old {
return false
}
}
}
// unexpungeLocked ensures that the entry is not marked as expunged.
//
// If the entry was previously expunged, it must be added to the dirty map
// before m.mu is unlocked.
func (e *entry) unexpungeLocked() (wasExpunged bool) {
return e.p.CompareAndSwap(expunged, nil)
}
// swapLocked unconditionally swaps a value into the entry.
//
// The entry must be known not to be expunged.
func (e *entry) swapLocked(i *any) *any {
return e.p.Swap(i)
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
// Avoid locking if it's a clean hit.
read := m.loadReadOnly()
if e, ok := read.m[key]; ok {
actual, loaded, ok := e.tryLoadOrStore(value)
if ok {
return actual, loaded
}
}
m.mu.Lock()
read = m.loadReadOnly()
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
m.dirty[key] = e
}
actual, loaded, _ = e.tryLoadOrStore(value)
} else if e, ok := m.dirty[key]; ok {
actual, loaded, _ = e.tryLoadOrStore(value)
m.missLocked()
} else {
if !read.amended {
// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(&readOnly{m: read.m, amended: true})
}
m.dirty[key] = newEntry(value)
actual, loaded = value, false
}
m.mu.Unlock()
return actual, loaded
}
// tryLoadOrStore atomically loads or stores a value if the entry is not
// expunged.
//
// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
// returns with ok==false.
func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) {
p := e.p.Load()
if p == expunged {
return nil, false, false
}
if p != nil {
return *p, true, true
}
// Copy the interface after the first load to make this method more amenable
// to escape analysis: if we hit the "load" path or the entry is expunged, we
// shouldn't bother heap-allocating.
ic := i
for {
if e.p.CompareAndSwap(nil, &ic) {
return i, false, true
}
p = e.p.Load()
if p == expunged {
return nil, false, false
}
if p != nil {
return *p, true, true
}
}
}
// LoadAndDelete deletes the value for a key, returning the previous value if any.
// The loaded result reports whether the key was present.
func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
read := m.loadReadOnly()
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
read = m.loadReadOnly()
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
delete(m.dirty, key)
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
if ok {
return e.delete()
}
return nil, false
}
// Delete deletes the value for a key.
func (m *Map) Delete(key any) {
m.LoadAndDelete(key)
}
func (e *entry) delete() (value any, ok bool) {
for {
p := e.p.Load()
if p == nil || p == expunged {
return nil, false
}
if e.p.CompareAndSwap(p, nil) {
return *p, true
}
}
}
// trySwap swaps a value if the entry has not been expunged.
//
// If the entry is expunged, trySwap returns false and leaves the entry
// unchanged.
func (e *entry) trySwap(i *any) (*any, bool) {
for {
p := e.p.Load()
if p == expunged {
return nil, false
}
if e.p.CompareAndSwap(p, i) {
return p, true
}
}
}
// Swap swaps the value for a key and returns the previous value if any.
// The loaded result reports whether the key was present.
func (m *Map) Swap(key, value any) (previous any, loaded bool) {
read := m.loadReadOnly()
if e, ok := read.m[key]; ok {
if v, ok := e.trySwap(&value); ok {
if v == nil {
return nil, false
}
return *v, true
}
}
m.mu.Lock()
read = m.loadReadOnly()
if e, ok := read.m[key]; ok {
if e.unexpungeLocked() {
// The entry was previously expunged, which implies that there is a
// non-nil dirty map and this entry is not in it.
m.dirty[key] = e
}
if v := e.swapLocked(&value); v != nil {
loaded = true
previous = *v
}
} else if e, ok := m.dirty[key]; ok {
if v := e.swapLocked(&value); v != nil {
loaded = true
previous = *v
}
} else {
if !read.amended {
// We're adding the first new key to the dirty map.
// Make sure it is allocated and mark the read-only map as incomplete.
m.dirtyLocked()
m.read.Store(&readOnly{m: read.m, amended: true})
}
m.dirty[key] = newEntry(value)
}
m.mu.Unlock()
return previous, loaded
}
// CompareAndSwap swaps the old and new values for key
// if the value stored in the map is equal to old.
// The old value must be of a comparable type.
func (m *Map) CompareAndSwap(key, old, new any) bool {
read := m.loadReadOnly()
if e, ok := read.m[key]; ok {
return e.tryCompareAndSwap(old, new)
} else if !read.amended {
return false // No existing value for key.
}
m.mu.Lock()
defer m.mu.Unlock()
read = m.loadReadOnly()
swapped := false
if e, ok := read.m[key]; ok {
swapped = e.tryCompareAndSwap(old, new)
} else if e, ok := m.dirty[key]; ok {
swapped = e.tryCompareAndSwap(old, new)
// We needed to lock mu in order to load the entry for key,
// and the operation didn't change the set of keys in the map
// (so it would be made more efficient by promoting the dirty
// map to read-only).
// Count it as a miss so that we will eventually switch to the
// more efficient steady state.
m.missLocked()
}
return swapped
}
// CompareAndDelete deletes the entry for key if its value is equal to old.
// The old value must be of a comparable type.
//
// If there is no current value for key in the map, CompareAndDelete
// returns false (even if the old value is the nil interface value).
func (m *Map) CompareAndDelete(key, old any) (deleted bool) {
read := m.loadReadOnly()
e, ok := read.m[key]
if !ok && read.amended {
m.mu.Lock()
read = m.loadReadOnly()
e, ok = read.m[key]
if !ok && read.amended {
e, ok = m.dirty[key]
// Don't delete key from m.dirty: we still need to do the “compare” part
// of the operation. The entry will eventually be expunged when the
// dirty map is promoted to the read map.
//
// Regardless of whether the entry was present, record a miss: this key
// will take the slow path until the dirty map is promoted to the read
// map.
m.missLocked()
}
m.mu.Unlock()
}
for ok {
p := e.p.Load()
if p == nil || p == expunged || *p != old {
return false
}
if e.p.CompareAndSwap(p, nil) {
return true
}
}
return false
}
// Range calls f sequentially for each key and value present in the map.
// If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot of the Map's
// contents: no key will be visited more than once, but if the value for any key
// is stored or deleted concurrently (including by f), Range may reflect any
// mapping for that key from any point during the Range call. Range does not
// block other methods on the receiver; even f itself may call any method on m.
//
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
func (m *Map) Range(f func(key, value any) bool) {
// We need to be able to iterate over all of the keys that were already
// present at the start of the call to Range.
// If read.amended is false, then read.m satisfies that property without
// requiring us to hold m.mu for a long time.
read := m.loadReadOnly()
if read.amended {
// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
// (assuming the caller does not break out early), so a call to Range
// amortizes an entire copy of the map: we can promote the dirty copy
// immediately!
m.mu.Lock()
read = m.loadReadOnly()
if read.amended {
read = readOnly{m: m.dirty}
m.read.Store(&read)
m.dirty = nil
m.misses = 0
}
m.mu.Unlock()
}
for k, e := range read.m {
v, ok := e.load()
if !ok {
continue
}
if !f(k, v) {
break
}
}
}
func (m *Map) missLocked() {
m.misses++
if m.misses < len(m.dirty) {
return
}
m.read.Store(&readOnly{m: m.dirty})
m.dirty = nil
m.misses = 0
}
func (m *Map) dirtyLocked() {
if m.dirty != nil {
return
}
read := m.loadReadOnly()
m.dirty = make(map[any]*entry, len(read.m))
for k, e := range read.m {
if !e.tryExpungeLocked() {
m.dirty[k] = e
}
}
}
func (e *entry) tryExpungeLocked() (isExpunged bool) {
p := e.p.Load()
for p == nil {
if e.p.CompareAndSwap(nil, expunged) {
return true
}
p = e.p.Load()
}
return p == expunged
}

View File

@@ -0,0 +1,33 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync
import "unsafe"
type Pool struct {
noCopy noCopy
local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
localSize uintptr // size of the local array
victim unsafe.Pointer // local from previous cycle
victimSize uintptr // size of victims array
// New optionally specifies a function to generate
// a value when Get would otherwise return nil.
// It may not be changed concurrently with calls to Get.
New func() any
}
func (p *Pool) Put(x any) {
}
// TODO(xsw):
func (p *Pool) Get() any {
if p.New != nil {
return p.New()
}
return nil
}

View File

@@ -0,0 +1,168 @@
/*
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sync
// llgo:skipall
import (
gosync "sync"
"unsafe"
"github.com/goplus/llgo/c"
"github.com/goplus/llgo/c/pthread/sync"
)
// -----------------------------------------------------------------------------
type Mutex sync.Mutex
func (m *Mutex) Lock() {
if *(*c.Long)(unsafe.Pointer(m)) == 0 {
(*sync.Mutex)(m).Init(nil) // TODO(xsw): finalize
}
(*sync.Mutex)(m).Lock()
}
func (m *Mutex) TryLock() bool {
if *(*c.Long)(unsafe.Pointer(m)) == 0 {
(*sync.Mutex)(m).Init(nil)
}
return (*sync.Mutex)(m).TryLock() == 0
}
// llgo:link (*Mutex).Unlock C.pthread_mutex_unlock
func (m *Mutex) Unlock() {}
// -----------------------------------------------------------------------------
type RWMutex sync.RWLock
func (rw *RWMutex) RLock() {
if *(*c.Long)(unsafe.Pointer(rw)) == 0 {
(*sync.RWLock)(rw).Init(nil)
}
(*sync.RWLock)(rw).RLock()
}
func (rw *RWMutex) TryRLock() bool {
if *(*c.Long)(unsafe.Pointer(rw)) == 0 {
(*sync.RWLock)(rw).Init(nil)
}
return (*sync.RWLock)(rw).TryRLock() == 0
}
// llgo:link (*RWMutex).RUnlock C.pthread_rwlock_unlock
func (rw *RWMutex) RUnlock() {}
func (rw *RWMutex) Lock() {
if *(*c.Long)(unsafe.Pointer(rw)) == 0 {
(*sync.RWLock)(rw).Init(nil)
}
(*sync.RWLock)(rw).Lock()
}
func (rw *RWMutex) TryLock() bool {
if *(*c.Long)(unsafe.Pointer(rw)) == 0 {
(*sync.RWLock)(rw).Init(nil)
}
return (*sync.RWLock)(rw).TryLock() == 0
}
// llgo:link (*RWMutex).Unlock C.pthread_rwlock_unlock
func (rw *RWMutex) Unlock() {}
// -----------------------------------------------------------------------------
type Once struct {
m Mutex
done bool
}
func (o *Once) Do(f func()) {
if !o.done {
o.m.Lock()
if !o.done {
o.done = true
f()
}
o.m.Unlock()
}
}
// -----------------------------------------------------------------------------
type Cond struct {
cond sync.Cond
m *sync.Mutex
}
func NewCond(l gosync.Locker) *Cond {
ret := &Cond{m: l.(*sync.Mutex)}
ret.cond.Init(nil) // TODO(xsw): finalize
return ret
}
// llgo:link (*Cond).Signal C.pthread_cond_signal
func (c *Cond) Signal() {}
// llgo:link (*Cond).Broadcast C.pthread_cond_broadcast
func (c *Cond) Broadcast() {}
func (c *Cond) Wait() {
c.cond.Wait(c.m)
}
// -----------------------------------------------------------------------------
type WaitGroup struct {
mutex sync.Mutex
cond sync.Cond
count int
}
func (wg *WaitGroup) doInit() {
wg.mutex.Init(nil)
wg.cond.Init(nil) // TODO(xsw): finalize
}
func (wg *WaitGroup) Add(delta int) {
if *(*c.Long)(unsafe.Pointer(wg)) == 0 {
wg.doInit()
}
wg.mutex.Lock()
wg.count += delta
if wg.count <= 0 {
wg.cond.Broadcast()
}
wg.mutex.Unlock()
}
func (wg *WaitGroup) Done() {
wg.Add(-1)
}
func (wg *WaitGroup) Wait() {
if *(*c.Long)(unsafe.Pointer(wg)) == 0 {
wg.doInit()
}
wg.mutex.Lock()
for wg.count > 0 {
wg.cond.Wait(&wg.mutex)
}
wg.mutex.Unlock()
}
// -----------------------------------------------------------------------------