ssa: support runtime.map
This commit is contained in:
83
internal/runtime/alg.go
Normal file
83
internal/runtime/alg.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func ptrequal(p, q unsafe.Pointer) bool {
|
||||
return p == q
|
||||
}
|
||||
func memequal0(p, q unsafe.Pointer) bool {
|
||||
return true
|
||||
}
|
||||
func memequal8(p, q unsafe.Pointer) bool {
|
||||
return *(*int8)(p) == *(*int8)(q)
|
||||
}
|
||||
func memequal16(p, q unsafe.Pointer) bool {
|
||||
return *(*int16)(p) == *(*int16)(q)
|
||||
}
|
||||
func memequal32(p, q unsafe.Pointer) bool {
|
||||
return *(*int32)(p) == *(*int32)(q)
|
||||
}
|
||||
func memequal64(p, q unsafe.Pointer) bool {
|
||||
return *(*int64)(p) == *(*int64)(q)
|
||||
}
|
||||
func memequal128(p, q unsafe.Pointer) bool {
|
||||
return *(*[2]int64)(p) == *(*[2]int64)(q)
|
||||
}
|
||||
func f32equal(p, q unsafe.Pointer) bool {
|
||||
return *(*float32)(p) == *(*float32)(q)
|
||||
}
|
||||
func f64equal(p, q unsafe.Pointer) bool {
|
||||
return *(*float64)(p) == *(*float64)(q)
|
||||
}
|
||||
func c64equal(p, q unsafe.Pointer) bool {
|
||||
return *(*complex64)(p) == *(*complex64)(q)
|
||||
}
|
||||
func c128equal(p, q unsafe.Pointer) bool {
|
||||
return *(*complex128)(p) == *(*complex128)(q)
|
||||
}
|
||||
func strequal(p, q unsafe.Pointer) bool {
|
||||
return *(*string)(p) == *(*string)(q)
|
||||
}
|
||||
func interequal(p, q unsafe.Pointer) bool {
|
||||
x := *(*iface)(p)
|
||||
y := *(*iface)(q)
|
||||
return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data)
|
||||
}
|
||||
func nilinterequal(p, q unsafe.Pointer) bool {
|
||||
x := *(*eface)(p)
|
||||
y := *(*eface)(q)
|
||||
return x._type == y._type && efaceeq(x._type, x.data, y.data)
|
||||
}
|
||||
func efaceeq(t *_type, x, y unsafe.Pointer) bool {
|
||||
if t == nil {
|
||||
return true
|
||||
}
|
||||
eq := t.Equal
|
||||
if eq == nil {
|
||||
panic(errorString("comparing uncomparable type " + t.Str_).Error())
|
||||
}
|
||||
if isDirectIface(t) {
|
||||
// Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof.
|
||||
// Maps and funcs are not comparable, so they can't reach here.
|
||||
// Ptrs, chans, and single-element items can be compared directly using ==.
|
||||
return x == y
|
||||
}
|
||||
return eq(x, y)
|
||||
}
|
||||
func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
|
||||
if tab == nil {
|
||||
return true
|
||||
}
|
||||
t := tab._type
|
||||
eq := t.Equal
|
||||
if eq == nil {
|
||||
panic(errorString("comparing uncomparable type " + t.Str_).Error())
|
||||
}
|
||||
if isDirectIface(t) {
|
||||
// See comment in efaceeq.
|
||||
return x == y
|
||||
}
|
||||
return eq(x, y)
|
||||
}
|
||||
@@ -4,6 +4,14 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/internal/abi"
|
||||
"github.com/goplus/llgo/internal/runtime/goarch"
|
||||
"github.com/goplus/llgo/internal/runtime/math"
|
||||
)
|
||||
|
||||
// This file contains the implementation of Go's map type.
|
||||
//
|
||||
// A map is just a hash table. The data is arranged
|
||||
@@ -53,12 +61,6 @@ package runtime
|
||||
// Keep in mind this data is for maximally loaded tables, i.e. just
|
||||
// before the table grows. Typical tables will be somewhat less loaded.
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/internal/abi"
|
||||
)
|
||||
|
||||
const (
|
||||
// Maximum number of key/elem pairs a bucket can hold.
|
||||
bucketCntBits = abi.MapBucketCountBits
|
||||
@@ -103,7 +105,7 @@ const (
|
||||
sameSizeGrow = 8 // the current map growth is to a new map of the same size
|
||||
|
||||
// sentinel bucket ID for iterator checks
|
||||
// noCheck = 1<<(8*goarch.PtrSize) - 1
|
||||
noCheck = 1<<(8*goarch.PtrSize) - 1
|
||||
)
|
||||
|
||||
// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
|
||||
@@ -158,7 +160,6 @@ type bmap struct {
|
||||
// Followed by an overflow pointer.
|
||||
}
|
||||
|
||||
/*
|
||||
// A hash iteration structure.
|
||||
// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
|
||||
// and reflect/value.go to match the layout of this structure.
|
||||
@@ -286,7 +287,6 @@ func makemap64(t *maptype, hint int64, h *hmap) *hmap {
|
||||
}
|
||||
return makemap(t, int(hint), h)
|
||||
}
|
||||
*/
|
||||
|
||||
// makemap_small implements Go map creation for make(map[k]v) and
|
||||
// make(map[k]v, hint) when hint is known to be at most bucketCnt
|
||||
@@ -297,7 +297,6 @@ func makemap_small() *hmap {
|
||||
return h
|
||||
}
|
||||
|
||||
/*
|
||||
// makemap implements Go map creation for make(map[k]v, hint).
|
||||
// If the compiler has determined that the map or the first bucket
|
||||
// can be created on the stack, h and/or bucket may be non-nil.
|
||||
@@ -395,18 +394,18 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
|
||||
// NOTE: The returned pointer may keep the whole map live, so don't
|
||||
// hold onto it for very long.
|
||||
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
pc := abi.FuncPCABIInternal(mapaccess1)
|
||||
racereadpc(unsafe.Pointer(h), callerpc, pc)
|
||||
raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
}
|
||||
if msanenabled && h != nil {
|
||||
msanread(key, t.Key.Size_)
|
||||
}
|
||||
if asanenabled && h != nil {
|
||||
asanread(key, t.Key.Size_)
|
||||
}
|
||||
// if raceenabled && h != nil {
|
||||
// callerpc := getcallerpc()
|
||||
// pc := abi.FuncPCABIInternal(mapaccess1)
|
||||
// racereadpc(unsafe.Pointer(h), callerpc, pc)
|
||||
// raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
// }
|
||||
// if msanenabled && h != nil {
|
||||
// msanread(key, t.Key.Size_)
|
||||
// }
|
||||
// if asanenabled && h != nil {
|
||||
// asanread(key, t.Key.Size_)
|
||||
// }
|
||||
if h == nil || h.count == 0 {
|
||||
if t.HashMightPanic() {
|
||||
t.Hasher(key, 0) // see issue 23734
|
||||
@@ -443,7 +442,7 @@ bucketloop:
|
||||
if t.IndirectKey() {
|
||||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
if mapKeyEqual(t, key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
@@ -456,18 +455,18 @@ bucketloop:
|
||||
}
|
||||
|
||||
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
pc := abi.FuncPCABIInternal(mapaccess2)
|
||||
racereadpc(unsafe.Pointer(h), callerpc, pc)
|
||||
raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
}
|
||||
if msanenabled && h != nil {
|
||||
msanread(key, t.Key.Size_)
|
||||
}
|
||||
if asanenabled && h != nil {
|
||||
asanread(key, t.Key.Size_)
|
||||
}
|
||||
// if raceenabled && h != nil {
|
||||
// callerpc := getcallerpc()
|
||||
// pc := abi.FuncPCABIInternal(mapaccess2)
|
||||
// racereadpc(unsafe.Pointer(h), callerpc, pc)
|
||||
// raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
// }
|
||||
// if msanenabled && h != nil {
|
||||
// msanread(key, t.Key.Size_)
|
||||
// }
|
||||
// if asanenabled && h != nil {
|
||||
// asanread(key, t.Key.Size_)
|
||||
// }
|
||||
if h == nil || h.count == 0 {
|
||||
if t.HashMightPanic() {
|
||||
t.Hasher(key, 0) // see issue 23734
|
||||
@@ -504,7 +503,7 @@ bucketloop:
|
||||
if t.IndirectKey() {
|
||||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
if mapKeyEqual(t, key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
@@ -548,7 +547,7 @@ bucketloop:
|
||||
if t.IndirectKey() {
|
||||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
if mapKeyEqual(t, key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
@@ -581,18 +580,19 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
||||
if h == nil {
|
||||
panic(plainError("assignment to entry in nil map"))
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
pc := abi.FuncPCABIInternal(mapassign)
|
||||
racewritepc(unsafe.Pointer(h), callerpc, pc)
|
||||
raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
}
|
||||
if msanenabled {
|
||||
msanread(key, t.Key.Size_)
|
||||
}
|
||||
if asanenabled {
|
||||
asanread(key, t.Key.Size_)
|
||||
}
|
||||
|
||||
// if raceenabled {
|
||||
// callerpc := getcallerpc()
|
||||
// pc := abi.FuncPCABIInternal(mapassign)
|
||||
// racewritepc(unsafe.Pointer(h), callerpc, pc)
|
||||
// raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
// }
|
||||
// if msanenabled {
|
||||
// msanread(key, t.Key.Size_)
|
||||
// }
|
||||
// if asanenabled {
|
||||
// asanread(key, t.Key.Size_)
|
||||
// }
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
@@ -635,7 +635,7 @@ bucketloop:
|
||||
if t.IndirectKey() {
|
||||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if !t.Key.Equal(key, k) {
|
||||
if !mapKeyEqual(t, key, k) {
|
||||
continue
|
||||
}
|
||||
// already have a mapping for key. Update it.
|
||||
@@ -674,12 +674,15 @@ bucketloop:
|
||||
kmem := newobject(t.Key)
|
||||
*(*unsafe.Pointer)(insertk) = kmem
|
||||
insertk = kmem
|
||||
*(*unsafe.Pointer)(insertk) = key
|
||||
}
|
||||
if t.IndirectElem() {
|
||||
vmem := newobject(t.Elem)
|
||||
*(*unsafe.Pointer)(elem) = vmem
|
||||
}
|
||||
|
||||
typedmemmove(t.Key, insertk, key)
|
||||
|
||||
*inserti = top
|
||||
h.count++
|
||||
|
||||
@@ -695,18 +698,18 @@ done:
|
||||
}
|
||||
|
||||
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
pc := abi.FuncPCABIInternal(mapdelete)
|
||||
racewritepc(unsafe.Pointer(h), callerpc, pc)
|
||||
raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
}
|
||||
if msanenabled && h != nil {
|
||||
msanread(key, t.Key.Size_)
|
||||
}
|
||||
if asanenabled && h != nil {
|
||||
asanread(key, t.Key.Size_)
|
||||
}
|
||||
// if raceenabled && h != nil {
|
||||
// callerpc := getcallerpc()
|
||||
// pc := abi.FuncPCABIInternal(mapdelete)
|
||||
// racewritepc(unsafe.Pointer(h), callerpc, pc)
|
||||
// raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
// }
|
||||
// if msanenabled && h != nil {
|
||||
// msanread(key, t.Key.Size_)
|
||||
// }
|
||||
// if asanenabled && h != nil {
|
||||
// asanread(key, t.Key.Size_)
|
||||
// }
|
||||
if h == nil || h.count == 0 {
|
||||
if t.HashMightPanic() {
|
||||
t.Hasher(key, 0) // see issue 23734
|
||||
@@ -744,7 +747,7 @@ search:
|
||||
if t.IndirectKey() {
|
||||
k2 = *((*unsafe.Pointer)(k2))
|
||||
}
|
||||
if !t.Key.Equal(key, k2) {
|
||||
if !mapKeyEqual(t, key, k2) {
|
||||
continue
|
||||
}
|
||||
// Only clear key if there are pointers in it.
|
||||
@@ -815,10 +818,10 @@ search:
|
||||
// by the compilers order pass or on the heap by reflect_mapiterinit.
|
||||
// Both need to have zeroed hiter since the struct contains pointers.
|
||||
func mapiterinit(t *maptype, h *hmap, it *hiter) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
|
||||
}
|
||||
// if raceenabled && h != nil {
|
||||
// callerpc := getcallerpc()
|
||||
// racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
|
||||
// }
|
||||
|
||||
it.t = t
|
||||
if h == nil || h.count == 0 {
|
||||
@@ -859,7 +862,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
|
||||
// Remember we have an iterator.
|
||||
// Can run concurrently with another mapiterinit().
|
||||
if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
|
||||
atomic.Or8(&h.flags, iterator|oldIterator)
|
||||
atomicOr8(&h.flags, iterator|oldIterator)
|
||||
}
|
||||
|
||||
mapiternext(it)
|
||||
@@ -867,10 +870,10 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
|
||||
|
||||
func mapiternext(it *hiter) {
|
||||
h := it.h
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
|
||||
}
|
||||
// if raceenabled {
|
||||
// callerpc := getcallerpc()
|
||||
// racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
|
||||
// }
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map iteration and map write")
|
||||
}
|
||||
@@ -932,7 +935,7 @@ next:
|
||||
// through the oldbucket, skipping any keys that will go
|
||||
// to the other new bucket (each oldbucket expands to two
|
||||
// buckets during a grow).
|
||||
if t.ReflexiveKey() || t.Key.Equal(k, k) {
|
||||
if t.ReflexiveKey() || mapKeyEqual(t, k, k) {
|
||||
// If the item in the oldbucket is not destined for
|
||||
// the current new bucket in the iteration, skip it.
|
||||
hash := t.Hasher(k, uintptr(h.hash0))
|
||||
@@ -953,7 +956,7 @@ next:
|
||||
}
|
||||
}
|
||||
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
|
||||
!(t.ReflexiveKey() || t.Key.Equal(k, k)) {
|
||||
!(t.ReflexiveKey() || mapKeyEqual(t, k, k)) {
|
||||
// This is the golden data, we can return it.
|
||||
// OR
|
||||
// key!=key, so the entry can't be deleted or updated, so we can just return it.
|
||||
@@ -993,11 +996,11 @@ next:
|
||||
|
||||
// mapclear deletes all keys from a map.
|
||||
func mapclear(t *maptype, h *hmap) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
pc := abi.FuncPCABIInternal(mapclear)
|
||||
racewritepc(unsafe.Pointer(h), callerpc, pc)
|
||||
}
|
||||
// if raceenabled && h != nil {
|
||||
// callerpc := getcallerpc()
|
||||
// pc := abi.FuncPCABIInternal(mapclear)
|
||||
// racewritepc(unsafe.Pointer(h), callerpc, pc)
|
||||
// }
|
||||
|
||||
if h == nil || h.count == 0 {
|
||||
return
|
||||
@@ -1211,7 +1214,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
||||
// Compute hash to make our evacuation decision (whether we need
|
||||
// to send this key/elem to bucket x or bucket y).
|
||||
hash := t.Hasher(k2, uintptr(h.hash0))
|
||||
if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
|
||||
if h.flags&iterator != 0 && !t.ReflexiveKey() && !mapKeyEqual(t, k2, k2) {
|
||||
// If key != key (NaNs), then the hash could be (and probably
|
||||
// will be) entirely different from the old hash. Moreover,
|
||||
// it isn't reproducible. Reproducibility is required in the
|
||||
@@ -1307,6 +1310,7 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
|
||||
|
||||
// Reflect stubs. Called from ../reflect/asm_*.s
|
||||
|
||||
/*
|
||||
//go:linkname reflect_makemap reflect.makemap
|
||||
func reflect_makemap(t *maptype, cap int) *hmap {
|
||||
// Check invariants and reflects math.
|
||||
@@ -1413,10 +1417,10 @@ func reflect_maplen(h *hmap) int {
|
||||
if h == nil {
|
||||
return 0
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
|
||||
}
|
||||
// if raceenabled {
|
||||
// callerpc := getcallerpc()
|
||||
// racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
|
||||
// }
|
||||
return h.count
|
||||
}
|
||||
|
||||
@@ -1430,12 +1434,13 @@ func reflectlite_maplen(h *hmap) int {
|
||||
if h == nil {
|
||||
return 0
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
|
||||
}
|
||||
// if raceenabled {
|
||||
// callerpc := getcallerpc()
|
||||
// racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
|
||||
// }
|
||||
return h.count
|
||||
}
|
||||
*/
|
||||
|
||||
const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
|
||||
var zeroVal [maxZero]byte
|
||||
@@ -1445,7 +1450,7 @@ var zeroVal [maxZero]byte
|
||||
// rewrite the relocation (from the package init func) from the outlined
|
||||
// map init function to this symbol. Defined in assembly so as to avoid
|
||||
// complications with instrumentation (coverage, etc).
|
||||
func mapinitnoop()
|
||||
//func mapinitnoop()
|
||||
|
||||
// mapclone for implementing maps.Clone
|
||||
//
|
||||
@@ -1723,4 +1728,3 @@ func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
||||
b = b.overflow(t)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -13,3 +13,24 @@ func MulUintptr(a, b uintptr) (uintptr, bool) {
|
||||
overflow := b > MaxUintptr/a
|
||||
return a * b, overflow
|
||||
}
|
||||
|
||||
// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y
|
||||
// with the product bits' upper half returned in hi and the lower
|
||||
// half returned in lo.
|
||||
// This is a copy from math/bits.Mul64
|
||||
// On supported platforms this is an intrinsic lowered by the compiler.
|
||||
func Mul64(x, y uint64) (hi, lo uint64) {
|
||||
const mask32 = 1<<32 - 1
|
||||
x0 := x & mask32
|
||||
x1 := x >> 32
|
||||
y0 := y & mask32
|
||||
y1 := y >> 32
|
||||
w0 := x0 * y0
|
||||
t := x1*y0 + w0>>32
|
||||
w1 := t & mask32
|
||||
w2 := t >> 32
|
||||
w1 += x0 * y1
|
||||
hi = x1*y1 + w2 + w1>>32
|
||||
lo = x * y
|
||||
return
|
||||
}
|
||||
|
||||
@@ -4,11 +4,23 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import _ "unsafe"
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c/sync/atomic"
|
||||
"github.com/goplus/llgo/internal/runtime/math"
|
||||
)
|
||||
|
||||
//go:linkname fastrand C.rand
|
||||
func fastrand() uint32
|
||||
|
||||
func fastrand64() uint64 {
|
||||
n := uint64(fastrand())
|
||||
n += 0xa0761d6478bd642f
|
||||
hi, lo := math.Mul64(n, n^0xe7037ed1a0b428db)
|
||||
return hi ^ lo
|
||||
}
|
||||
|
||||
/* TODO(xsw):
|
||||
func fastrand() uint32 {
|
||||
mp := getg().m
|
||||
@@ -37,9 +49,74 @@ func fastrand() uint32 {
|
||||
}
|
||||
*/
|
||||
|
||||
//go:nosplit
|
||||
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(p) + x)
|
||||
}
|
||||
|
||||
// implementation of new builtin
|
||||
// compiler (both frontend and SSA backend) knows the signature
|
||||
// of this function.
|
||||
func newobject(typ *_type) unsafe.Pointer {
|
||||
return AllocZ(typ.Size_)
|
||||
}
|
||||
|
||||
// TODO
|
||||
func roundupsize(size uintptr) uintptr {
|
||||
// if size < _MaxSmallSize {
|
||||
// if size <= smallSizeMax-8 {
|
||||
// return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
|
||||
// } else {
|
||||
// return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
|
||||
// }
|
||||
// }
|
||||
// if size+_PageSize < size {
|
||||
// return size
|
||||
// }
|
||||
// return alignUp(size, _PageSize)
|
||||
return size
|
||||
}
|
||||
|
||||
// newarray allocates an array of n elements of type typ.
|
||||
func newarray(typ *_type, n int) unsafe.Pointer {
|
||||
if n == 1 {
|
||||
return AllocZ(typ.Size_)
|
||||
}
|
||||
mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
|
||||
if overflow || mem > maxAlloc || n < 0 {
|
||||
panic(plainError("runtime: allocation size out of range"))
|
||||
}
|
||||
return AllocZ(mem)
|
||||
}
|
||||
|
||||
const (
|
||||
// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
|
||||
_64bit = 1 << (^uintptr(0) >> 63) / 2
|
||||
heapAddrBits = (_64bit)*48 + (1-_64bit)*(32)
|
||||
maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
|
||||
)
|
||||
|
||||
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
|
||||
// bulkBarrierPreWrite(uintptr(ptr), 0, n)
|
||||
// memclrNoHeapPointers(ptr, n)
|
||||
}
|
||||
|
||||
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
|
||||
}
|
||||
|
||||
func fatal(s string) {
|
||||
print("fatal error: ", s, "\n")
|
||||
}
|
||||
|
||||
func throw(s string) {
|
||||
print("fatal error: ", s, "\n")
|
||||
}
|
||||
|
||||
func atomicOr8(ptr *uint8, v uint8) uint8 {
|
||||
return (uint8)(atomic.Or((*uint)(unsafe.Pointer(ptr)), uint(v)))
|
||||
}
|
||||
|
||||
func noescape(p unsafe.Pointer) unsafe.Pointer {
|
||||
x := uintptr(p)
|
||||
return unsafe.Pointer(x ^ 0)
|
||||
}
|
||||
|
||||
@@ -31,6 +31,12 @@ func (e errorString) Error() string {
|
||||
return "runtime error: " + string(e)
|
||||
}
|
||||
|
||||
type plainError string
|
||||
|
||||
func (e plainError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func AssertRuntimeError(b bool, msg string) {
|
||||
if b {
|
||||
panic(errorString(msg).Error())
|
||||
|
||||
@@ -16,10 +16,69 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/internal/abi"
|
||||
)
|
||||
|
||||
// Map represents a Go map.
|
||||
type Map = hmap
|
||||
type maptype = abi.MapType
|
||||
|
||||
type slice struct {
|
||||
array unsafe.Pointer
|
||||
len int
|
||||
cap int
|
||||
}
|
||||
|
||||
func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
|
||||
Typedmemmove(typ, dst, src)
|
||||
}
|
||||
|
||||
// MakeSmallMap creates a new small map.
|
||||
func MakeSmallMap() *Map {
|
||||
return makemap_small()
|
||||
}
|
||||
|
||||
func MakeMap(t *maptype, hint int) *hmap {
|
||||
return makemap(t, hint, nil)
|
||||
}
|
||||
|
||||
func MapAssign(t *maptype, h *Map, key unsafe.Pointer) unsafe.Pointer {
|
||||
return mapassign(t, h, key)
|
||||
}
|
||||
|
||||
func MapAccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
||||
return mapaccess1(t, h, key)
|
||||
}
|
||||
|
||||
func MapAccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
|
||||
return mapaccess2(t, h, key)
|
||||
}
|
||||
|
||||
func mapKeyEqual(t *maptype, p, q unsafe.Pointer) bool {
|
||||
if isDirectIface(t.Key) {
|
||||
switch t.Key.Size_ {
|
||||
case 0:
|
||||
return true
|
||||
case 1:
|
||||
return memequal8(p, q)
|
||||
case 2:
|
||||
return memequal16(p, q)
|
||||
case 4:
|
||||
return memequal32(p, q)
|
||||
case 8:
|
||||
return memequal64(p, q)
|
||||
}
|
||||
}
|
||||
switch t.Key.Kind() {
|
||||
case abi.String:
|
||||
return strequal(p, q)
|
||||
case abi.Complex64:
|
||||
return c64equal(p, q)
|
||||
case abi.Complex128:
|
||||
return c128equal(p, q)
|
||||
}
|
||||
return t.Key.Equal(p, q)
|
||||
}
|
||||
|
||||
@@ -40,6 +40,9 @@ func Basic(kind Kind) *Type {
|
||||
FieldAlign_: uint8(align),
|
||||
Kind_: uint8(kind),
|
||||
Str_: name,
|
||||
Equal: func(a, b unsafe.Pointer) bool {
|
||||
return uintptr(a) == uintptr(b)
|
||||
},
|
||||
}
|
||||
}
|
||||
return tyBasic[kind]
|
||||
@@ -208,4 +211,28 @@ func ChanOf(dir int, strChan string, elem *Type) *Type {
|
||||
return &ret.Type
|
||||
}
|
||||
|
||||
func MapOf(key, elem *Type, bucket *Type, flags int) *Type {
|
||||
ret := &abi.MapType{
|
||||
Type: Type{
|
||||
Size_: unsafe.Sizeof(uintptr(0)),
|
||||
Hash: uint32(abi.Map),
|
||||
Align_: pointerAlign,
|
||||
FieldAlign_: pointerAlign,
|
||||
Kind_: uint8(abi.Map),
|
||||
Str_: "map[" + key.String() + "]" + elem.String(),
|
||||
},
|
||||
Key: key,
|
||||
Elem: elem,
|
||||
Bucket: bucket,
|
||||
KeySize: uint8(key.Size_),
|
||||
ValueSize: uint8(elem.Size_),
|
||||
BucketSize: uint16(bucket.Size_),
|
||||
Flags: uint32(flags),
|
||||
}
|
||||
ret.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
|
||||
return uintptr(p)
|
||||
}
|
||||
return &ret.Type
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
Reference in New Issue
Block a user