Revert "runtime: map; llgo/ssa: MapUpdate"

This commit is contained in:
xushiwei
2024-06-14 22:50:23 +08:00
committed by GitHub
parent 78b8455bba
commit 5e45e38481
26 changed files with 149 additions and 3623 deletions

View File

@@ -59,12 +59,6 @@ import (
"github.com/goplus/llgo/internal/abi"
)
type maptype = abi.MapType
const (
goarchPtrSize = unsafe.Sizeof(uintptr(0))
)
const (
// Maximum number of key/elem pairs a bucket can hold.
bucketCntBits = abi.MapBucketCountBits
@@ -80,9 +74,8 @@ const (
// Must fit in a uint8.
// Fast versions cannot handle big elems - the cutoff size for
// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
//
// maxKeySize = abi.MapMaxKeyBytes
// maxElemSize = abi.MapMaxElemBytes
maxKeySize = abi.MapMaxKeyBytes
maxElemSize = abi.MapMaxElemBytes
// data offset should be the size of the bmap struct, but needs to be
// aligned correctly. For amd64p32 this means 64-bit alignment
@@ -186,12 +179,11 @@ type hiter struct {
bucket uintptr
checkBucket uintptr
}
*/
// bucketShift returns 1<<b, optimized for code generation.
func bucketShift(b uint8) uintptr {
// Masking the shift amount allows overflow checks to be elided.
return uintptr(1) << (b & uint8(goarchPtrSize*8-1))
return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
}
// bucketMask returns 1<<b - 1, optimized for code generation.
@@ -201,7 +193,7 @@ func bucketMask(b uint8) uintptr {
// tophash calculates the tophash value for hash.
func tophash(hash uintptr) uint8 {
top := uint8(hash >> (goarchPtrSize*8 - 8))
top := uint8(hash >> (goarch.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
@@ -214,18 +206,16 @@ func evacuated(b *bmap) bool {
}
func (b *bmap) overflow(t *maptype) *bmap {
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarchPtrSize))
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
}
func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarchPtrSize)) = ovf
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
}
/*
func (b *bmap) keys() unsafe.Pointer {
return add(unsafe.Pointer(b), dataOffset)
}
*/
// incrnoverflow increments h.noverflow.
// noverflow counts the number of overflow buckets.
@@ -290,7 +280,6 @@ func (h *hmap) createOverflow() {
}
}
/*
func makemap64(t *maptype, hint int64, h *hmap) *hmap {
if int64(int(hint)) != hint {
hint = 0
@@ -308,14 +297,15 @@ func makemap_small() *hmap {
return h
}
/*
// makemap implements Go map creation for make(map[k]v, hint).
// If the compiler has determined that the map or the first bucket
// can be created on the stack, h and/or bucket may be non-nil.
// If h != nil, the map can be created directly in h.
// If h.buckets != nil, bucket pointed to can be used as the first bucket.
func makemap(t *maptype, hint int, h *hmap) *hmap {
mem, overflow := mathMulUintptr(uintptr(hint), t.Bucket.Size_)
if overflow || mem > bigAlloc {
mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
if overflow || mem > maxAlloc {
hint = 0
}
@@ -399,7 +389,6 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
return buckets, nextOverflow
}
/*
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the elem type if
// the key is not in the map.
@@ -586,13 +575,24 @@ func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Point
}
return e, true
}
*/
// Like mapaccess, but allocates a slot for the key if it is not present in the map.
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc()
pc := abi.FuncPCABIInternal(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
if msanenabled {
msanread(key, t.Key.Size_)
}
if asanenabled {
asanread(key, t.Key.Size_)
}
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
@@ -694,7 +694,6 @@ done:
return elem
}
/*
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
callerpc := getcallerpc()
@@ -1056,7 +1055,6 @@ func mapclear(t *maptype, h *hmap) {
}
h.flags &^= hashWriting
}
*/
func hashGrow(t *maptype, h *hmap) {
// If we've hit the load factor, get bigger.
@@ -1307,7 +1305,6 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
}
}
/*
// Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap