reflect: implement map

This commit is contained in:
visualfc
2024-11-26 20:45:01 +08:00
parent e93d57983e
commit cd3a4bb8c8
2 changed files with 480 additions and 288 deletions

View File

@@ -27,6 +27,7 @@ import (
"github.com/goplus/llgo/internal/abi" "github.com/goplus/llgo/internal/abi"
"github.com/goplus/llgo/internal/lib/sync" "github.com/goplus/llgo/internal/lib/sync"
"github.com/goplus/llgo/internal/runtime" "github.com/goplus/llgo/internal/runtime"
"github.com/goplus/llgo/internal/runtime/goarch"
) )
// Type is the representation of a Go type. // Type is the representation of a Go type.
@@ -1466,3 +1467,171 @@ func funcStr(ft *funcType) string {
} }
return string(repr) return string(repr)
} }
// isReflexive reports whether the == operation on the type is reflexive.
// That is, x == x for all values x of type t.
func isReflexive(t *abi.Type) bool {
switch Kind(t.Kind()) {
case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer:
return true
case Float32, Float64, Complex64, Complex128, Interface:
return false
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
return isReflexive(tt.Elem)
case Struct:
tt := (*structType)(unsafe.Pointer(t))
for _, f := range tt.Fields {
if !isReflexive(f.Typ) {
return false
}
}
return true
default:
// Func, Map, Slice, Invalid
panic("isReflexive called on non-key type " + stringFor(t))
}
}
// needKeyUpdate reports whether map overwrites require the key to be copied.
func needKeyUpdate(t *abi.Type) bool {
switch Kind(t.Kind()) {
case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer:
return false
case Float32, Float64, Complex64, Complex128, Interface, String:
// Float keys can be updated from +0 to -0.
// String keys can be updated to use a smaller backing store.
// Interfaces might have floats of strings in them.
return true
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
return needKeyUpdate(tt.Elem)
case Struct:
tt := (*structType)(unsafe.Pointer(t))
for _, f := range tt.Fields {
if needKeyUpdate(f.Typ) {
return true
}
}
return false
default:
// Func, Map, Slice, Invalid
panic("needKeyUpdate called on non-key type " + stringFor(t))
}
}
// hashMightPanic reports whether the hash of a map key of type t might panic.
func hashMightPanic(t *abi.Type) bool {
switch Kind(t.Kind()) {
case Interface:
return true
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
return hashMightPanic(tt.Elem)
case Struct:
tt := (*structType)(unsafe.Pointer(t))
for _, f := range tt.Fields {
if hashMightPanic(f.Typ) {
return true
}
}
return false
default:
return false
}
}
// Make sure these routines stay in sync with ../runtime/map.go!
// These types exist only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in string
// for possible debugging use.
const (
bucketSize uintptr = abi.MapBucketCount
maxKeySize uintptr = abi.MapMaxKeyBytes
maxValSize uintptr = abi.MapMaxElemBytes
)
func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
if ktyp.Size_ > maxKeySize {
ktyp = ptrTo(ktyp)
}
if etyp.Size_ > maxValSize {
etyp = ptrTo(etyp)
}
// Prepare GC data if any.
// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
// or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
// Note that since the key and value are known to be <= 128 bytes,
// they're guaranteed to have bitmaps instead of GC programs.
var gcdata *byte
var ptrdata uintptr
size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
panic("reflect: bad size computation in MapOf")
}
if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 {
nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
n := (nptr + 7) / 8
// Runtime needs pointer masks to be a multiple of uintptr in size.
n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
mask := make([]byte, n)
base := bucketSize / goarch.PtrSize
if ktyp.PtrBytes != 0 {
emitGCMask(mask, base, ktyp, bucketSize)
}
base += bucketSize * ktyp.Size_ / goarch.PtrSize
if etyp.PtrBytes != 0 {
emitGCMask(mask, base, etyp, bucketSize)
}
base += bucketSize * etyp.Size_ / goarch.PtrSize
word := base
mask[word/8] |= 1 << (word % 8)
gcdata = &mask[0]
ptrdata = (word + 1) * goarch.PtrSize
// overflow word must be last
if ptrdata != size {
panic("reflect: bad layout computation in MapOf")
}
}
b := &abi.Type{
Align_: goarch.PtrSize,
Size_: size,
Kind_: uint8(Struct),
PtrBytes: ptrdata,
GCData: gcdata,
}
b.Str_ = "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
return b
}
func (t *rtype) gcSlice(begin, end uintptr) []byte {
return (*[1 << 30]byte)(unsafe.Pointer(t.t.GCData))[begin:end:end]
}
// emitGCMask writes the GC mask for [n]typ into out, starting at bit
// offset base.
func emitGCMask(out []byte, base uintptr, typ *abi.Type, n uintptr) {
// if typ.Kind_&kindGCProg != 0 {
// panic("reflect: unexpected GC program")
// }
// ptrs := typ.PtrBytes / goarch.PtrSize
// words := typ.Size_ / goarch.PtrSize
// mask := typ.GcSlice(0, (ptrs+7)/8)
// for j := uintptr(0); j < ptrs; j++ {
// if (mask[j/8]>>(j%8))&1 != 0 {
// for i := uintptr(0); i < n; i++ {
// k := base + i*words + j
// out[k/8] |= 1 << (k % 8)
// }
// }
// }
}

View File

@@ -1251,29 +1251,28 @@ func (v Value) SetCap(n int) {
// As in Go, key's elem must be assignable to the map's key type, // As in Go, key's elem must be assignable to the map's key type,
// and elem's value must be assignable to the map's elem type. // and elem's value must be assignable to the map's elem type.
func (v Value) SetMapIndex(key, elem Value) { func (v Value) SetMapIndex(key, elem Value) {
/* TODO(xsw):
v.mustBe(Map) v.mustBe(Map)
v.mustBeExported() v.mustBeExported()
key.mustBeExported() key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ())) tt := (*mapType)(unsafe.Pointer(v.typ()))
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= maxValSize { // if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= maxValSize {
k := *(*string)(key.ptr) // k := *(*string)(key.ptr)
if elem.typ() == nil { // if elem.typ() == nil {
mapdelete_faststr(v.typ(), v.pointer(), k) // mapdelete_faststr(v.typ(), v.pointer(), k)
return // return
} // }
elem.mustBeExported() // elem.mustBeExported()
elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) // elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
var e unsafe.Pointer // var e unsafe.Pointer
if elem.flag&flagIndir != 0 { // if elem.flag&flagIndir != 0 {
e = elem.ptr // e = elem.ptr
} else { // } else {
e = unsafe.Pointer(&elem.ptr) // e = unsafe.Pointer(&elem.ptr)
} // }
mapassign_faststr(v.typ(), v.pointer(), k, e) // mapassign_faststr(v.typ(), v.pointer(), k, e)
return // return
} // }
key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil) key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil)
var k unsafe.Pointer var k unsafe.Pointer
@@ -1295,8 +1294,6 @@ func (v Value) SetMapIndex(key, elem Value) {
e = unsafe.Pointer(&elem.ptr) e = unsafe.Pointer(&elem.ptr)
} }
mapassign(v.typ(), v.pointer(), k, e) mapassign(v.typ(), v.pointer(), k, e)
*/
panic("todo: reflect.Value.SetMapIndex")
} }
// SetUint sets v's underlying value to x. // SetUint sets v's underlying value to x.
@@ -2270,296 +2267,277 @@ func (v Value) call(op string, in []Value) (out []Value) {
return return
} }
// var callGC bool // for testing; see TestCallMethodJump and TestCallArgLive var stringType = rtypeOf("")
// const debugReflectCall = false // MapIndex returns the value associated with key in the map v.
// It panics if v's Kind is not Map.
// It returns the zero Value if key is not found in the map or if v represents a nil map.
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
v.mustBe(Map)
tt := (*mapType)(unsafe.Pointer(v.typ()))
// func (v Value) call(op string, in []Value) []Value { // Do not require key to be exported, so that DeepEqual
// // Get function pointer, type. // and other programs can use all the keys returned by
// t := (*funcType)(unsafe.Pointer(v.typ())) // MapKeys as arguments to MapIndex. If either the map
// var ( // or the key is unexported, though, the result will be
// fn unsafe.Pointer // considered unexported. This is consistent with the
// rcvr Value // behavior for structs, which allow read but not write
// rcvrtype *abi.Type // of unexported fields.
// )
// if v.flag&flagMethod != 0 {
// rcvr = v
// rcvrtype, t, fn = methodReceiver(op, v, int(v.flag)>>flagMethodShift)
// } else if v.flag&flagIndir != 0 {
// fn = *(*unsafe.Pointer)(v.ptr)
// } else {
// fn = v.ptr
// }
// if fn == nil { var e unsafe.Pointer
// panic("reflect.Value.Call: call of nil function") // if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= maxValSize {
// } // k := *(*string)(key.ptr)
// e = mapaccess_faststr(v.typ(), v.pointer(), k)
// } else {
key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil)
var k unsafe.Pointer
if key.flag&flagIndir != 0 {
k = key.ptr
} else {
k = unsafe.Pointer(&key.ptr)
}
e = mapaccess(v.typ(), v.pointer(), k)
// }
if e == nil {
return Value{}
}
typ := tt.Elem
fl := (v.flag | key.flag).ro()
fl |= flag(typ.Kind())
return copyVal(typ, fl, e)
}
// isSlice := op == "CallSlice" // MapKeys returns a slice containing all the keys present in the map,
// n := t.NumIn() // in unspecified order.
// isVariadic := t.IsVariadic() // It panics if v's Kind is not Map.
// if isSlice { // It returns an empty slice if v represents a nil map.
// if !isVariadic { func (v Value) MapKeys() []Value {
// panic("reflect: CallSlice of non-variadic function") v.mustBe(Map)
// } tt := (*mapType)(unsafe.Pointer(v.typ()))
// if len(in) < n { keyType := tt.Key
// panic("reflect: CallSlice with too few input arguments")
// }
// if len(in) > n {
// panic("reflect: CallSlice with too many input arguments")
// }
// } else {
// if isVariadic {
// n--
// }
// if len(in) < n {
// panic("reflect: Call with too few input arguments")
// }
// if !isVariadic && len(in) > n {
// panic("reflect: Call with too many input arguments")
// }
// }
// for _, x := range in {
// if x.Kind() == Invalid {
// panic("reflect: " + op + " using zero Value argument")
// }
// }
// for i := 0; i < n; i++ {
// if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(toRType(targ)) {
// panic("reflect: " + op + " using " + xt.String() + " as type " + stringFor(targ))
// }
// }
// if !isSlice && isVariadic {
// // prepare slice for remaining values
// m := len(in) - n
// slice := MakeSlice(toRType(t.In(n)), m, m)
// elem := toRType(t.In(n)).Elem() // FIXME cast to slice type and Elem()
// for i := 0; i < m; i++ {
// x := in[n+i]
// if xt := x.Type(); !xt.AssignableTo(elem) {
// panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op)
// }
// slice.Index(i).Set(x)
// }
// origIn := in
// in = make([]Value, n+1)
// copy(in[:n], origIn)
// in[n] = slice
// }
// nin := len(in) fl := v.flag.ro() | flag(keyType.Kind())
// if nin != t.NumIn() {
// panic("reflect.Value.Call: wrong argument count")
// }
// nout := t.NumOut()
// // Register argument space. m := v.pointer()
// var regArgs abi.RegArgs mlen := int(0)
if m != nil {
mlen = maplen(m)
}
var it hiter
mapiterinit(v.typ(), m, &it)
a := make([]Value, mlen)
var i int
for i = 0; i < len(a); i++ {
key := mapiterkey(&it)
if key == nil {
// Someone deleted an entry from the map since we
// called maplen above. It's a data race, but nothing
// we can do about it.
break
}
a[i] = copyVal(keyType, fl, key)
mapiternext(&it)
}
return a[:i]
}
// // Compute frame type. // hiter's structure matches runtime.hiter's structure.
// frametype, framePool, abid := funcLayout(t, rcvrtype) // Having a clone here allows us to embed a map iterator
// inside type MapIter so that MapIters can be re-used
// without doing any allocations.
type hiter struct {
key unsafe.Pointer
elem unsafe.Pointer
t unsafe.Pointer
h unsafe.Pointer
buckets unsafe.Pointer
bptr unsafe.Pointer
overflow *[]unsafe.Pointer
oldoverflow *[]unsafe.Pointer
startBucket uintptr
offset uint8
wrapped bool
B uint8
i uint8
bucket uintptr
checkBucket uintptr
}
// // Allocate a chunk of memory for frame if needed. func (h *hiter) initialized() bool {
// var stackArgs unsafe.Pointer return h.t != nil
// if frametype.Size() != 0 { }
// if nout == 0 {
// stackArgs = framePool.Get().(unsafe.Pointer)
// } else {
// // Can't use pool if the function has return values.
// // We will leak pointer to args in ret, so its lifetime is not scoped.
// stackArgs = unsafe_New(frametype)
// }
// }
// frameSize := frametype.Size()
// if debugReflectCall { // A MapIter is an iterator for ranging over a map.
// println("reflect.call", stringFor(&t.Type)) // See Value.MapRange.
// abid.dump() type MapIter struct {
// } m Value
hiter hiter
}
// // Copy inputs into args. // Key returns the key of iter's current map entry.
func (iter *MapIter) Key() Value {
if !iter.hiter.initialized() {
panic("MapIter.Key called before Next")
}
iterkey := mapiterkey(&iter.hiter)
if iterkey == nil {
panic("MapIter.Key called on exhausted iterator")
}
// // Handle receiver. t := (*mapType)(unsafe.Pointer(iter.m.typ()))
// inStart := 0 ktype := t.Key
// if rcvrtype != nil { return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
// // Guaranteed to only be one word in size, }
// // so it will only take up exactly 1 abiStep (either
// // in a register or on the stack).
// switch st := abid.call.steps[0]; st.kind {
// case abiStepStack:
// storeRcvr(rcvr, stackArgs)
// case abiStepPointer:
// storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ptrs[st.ireg]))
// fallthrough
// case abiStepIntReg:
// storeRcvr(rcvr, unsafe.Pointer(&regArgs.Ints[st.ireg]))
// case abiStepFloatReg:
// storeRcvr(rcvr, unsafe.Pointer(&regArgs.Floats[st.freg]))
// default:
// panic("unknown ABI parameter kind")
// }
// inStart = 1
// }
// // Handle arguments. // SetIterKey assigns to v the key of iter's current map entry.
// for i, v := range in { // It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
// v.mustBeExported() // As in Go, the key must be assignable to v's type and
// targ := toRType(t.In(i)) // must not be derived from an unexported field.
// // TODO(mknyszek): Figure out if it's possible to get some func (v Value) SetIterKey(iter *MapIter) {
// // scratch space for this assignment check. Previously, it if !iter.hiter.initialized() {
// // was possible to use space in the argument frame. panic("reflect: Value.SetIterKey called before Next")
// v = v.assignTo("reflect.Value.Call", &targ.t, nil) }
// stepsLoop: iterkey := mapiterkey(&iter.hiter)
// for _, st := range abid.call.stepsForValue(i + inStart) { if iterkey == nil {
// switch st.kind { panic("reflect: Value.SetIterKey called on exhausted iterator")
// case abiStepStack: }
// // Copy values to the "stack."
// addr := add(stackArgs, st.stkOff, "precomputed stack arg offset")
// if v.flag&flagIndir != 0 {
// typedmemmove(&targ.t, addr, v.ptr)
// } else {
// *(*unsafe.Pointer)(addr) = v.ptr
// }
// // There's only one step for a stack-allocated value.
// break stepsLoop
// case abiStepIntReg, abiStepPointer:
// // Copy values to "integer registers."
// if v.flag&flagIndir != 0 {
// offset := add(v.ptr, st.offset, "precomputed value offset")
// if st.kind == abiStepPointer {
// // Duplicate this pointer in the pointer area of the
// // register space. Otherwise, there's the potential for
// // this to be the last reference to v.ptr.
// regArgs.Ptrs[st.ireg] = *(*unsafe.Pointer)(offset)
// }
// intToReg(&regArgs, st.ireg, st.size, offset)
// } else {
// if st.kind == abiStepPointer {
// // See the comment in abiStepPointer case above.
// regArgs.Ptrs[st.ireg] = v.ptr
// }
// regArgs.Ints[st.ireg] = uintptr(v.ptr)
// }
// case abiStepFloatReg:
// // Copy values to "float registers."
// if v.flag&flagIndir == 0 {
// panic("attempted to copy pointer to FP register")
// }
// offset := add(v.ptr, st.offset, "precomputed value offset")
// floatToReg(&regArgs, st.freg, st.size, offset)
// default:
// panic("unknown ABI part kind")
// }
// }
// }
// // TODO(mknyszek): Remove this when we no longer have
// // caller reserved spill space.
// frameSize = align(frameSize, goarch.PtrSize)
// frameSize += abid.spill
// // Mark pointers in registers for the return path. v.mustBeAssignable()
// regArgs.ReturnIsPtr = abid.outRegPtrs var target unsafe.Pointer
if v.kind() == Interface {
target = v.ptr
}
// if debugReflectCall { t := (*mapType)(unsafe.Pointer(iter.m.typ()))
// regArgs.Dump() ktype := t.Key
// }
// // For testing; see TestCallArgLive. iter.m.mustBeExported() // do not let unexported m leak
// if callGC { key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
// runtime.GC() key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target)
// } typedmemmove(v.typ(), v.ptr, key.ptr)
}
// // Call. // Value returns the value of iter's current map entry.
// call(frametype, fn, stackArgs, uint32(frametype.Size()), uint32(abid.retOffset), uint32(frameSize), &regArgs) func (iter *MapIter) Value() Value {
if !iter.hiter.initialized() {
panic("MapIter.Value called before Next")
}
iterelem := mapiterelem(&iter.hiter)
if iterelem == nil {
panic("MapIter.Value called on exhausted iterator")
}
// // For testing; see TestCallMethodJump. t := (*mapType)(unsafe.Pointer(iter.m.typ()))
// if callGC { vtype := t.Elem
// runtime.GC() return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
// } }
// var ret []Value // SetIterValue assigns to v the value of iter's current map entry.
// if nout == 0 { // It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
// if stackArgs != nil { // As in Go, the value must be assignable to v's type and
// typedmemclr(frametype, stackArgs) // must not be derived from an unexported field.
// framePool.Put(stackArgs) func (v Value) SetIterValue(iter *MapIter) {
// } if !iter.hiter.initialized() {
// } else { panic("reflect: Value.SetIterValue called before Next")
// if stackArgs != nil { }
// // Zero the now unused input area of args, iterelem := mapiterelem(&iter.hiter)
// // because the Values returned by this function contain pointers to the args object, if iterelem == nil {
// // and will thus keep the args object alive indefinitely. panic("reflect: Value.SetIterValue called on exhausted iterator")
// typedmemclrpartial(frametype, stackArgs, 0, abid.retOffset) }
// }
// // Wrap Values around return values in args. v.mustBeAssignable()
// ret = make([]Value, nout) var target unsafe.Pointer
// for i := 0; i < nout; i++ { if v.kind() == Interface {
// tv := t.Out(i) target = v.ptr
// if tv.Size() == 0 { }
// // For zero-sized return value, args+off may point to the next object.
// // In this case, return the zero value instead.
// ret[i] = Zero(toRType(tv))
// continue
// }
// steps := abid.ret.stepsForValue(i)
// if st := steps[0]; st.kind == abiStepStack {
// // This value is on the stack. If part of a value is stack
// // allocated, the entire value is according to the ABI. So
// // just make an indirection into the allocated frame.
// fl := flagIndir | flag(tv.Kind())
// ret[i] = Value{tv, add(stackArgs, st.stkOff, "tv.Size() != 0"), fl}
// // Note: this does introduce false sharing between results -
// // if any result is live, they are all live.
// // (And the space for the args is live as well, but as we've
// // cleared that space it isn't as big a deal.)
// continue
// }
// // Handle pointers passed in registers. t := (*mapType)(unsafe.Pointer(iter.m.typ()))
// if !ifaceIndir(tv) { vtype := t.Elem
// // Pointer-valued data gets put directly
// // into v.ptr.
// if steps[0].kind != abiStepPointer {
// print("kind=", steps[0].kind, ", type=", stringFor(tv), "\n")
// panic("mismatch between ABI description and types")
// }
// ret[i] = Value{tv, regArgs.Ptrs[steps[0].ireg], flag(tv.Kind())}
// continue
// }
// // All that's left is values passed in registers that we need to iter.m.mustBeExported() // do not let unexported m leak
// // create space for and copy values back into. elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
// // elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target)
// // TODO(mknyszek): We make a new allocation for each register-allocated typedmemmove(v.typ(), v.ptr, elem.ptr)
// // value, but previously we could always point into the heap-allocated }
// // stack frame. This is a regression that could be fixed by adding
// // additional space to the allocated stack frame and storing the
// // register-allocated return values into the allocated stack frame and
// // referring there in the resulting Value.
// s := unsafe_New(tv)
// for _, st := range steps {
// switch st.kind {
// case abiStepIntReg:
// offset := add(s, st.offset, "precomputed value offset")
// intFromReg(&regArgs, st.ireg, st.size, offset)
// case abiStepPointer:
// s := add(s, st.offset, "precomputed value offset")
// *((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg]
// case abiStepFloatReg:
// offset := add(s, st.offset, "precomputed value offset")
// floatFromReg(&regArgs, st.freg, st.size, offset)
// case abiStepStack:
// panic("register-based return value has stack component")
// default:
// panic("unknown ABI part kind")
// }
// }
// ret[i] = Value{tv, s, flagIndir | flag(tv.Kind())}
// }
// }
// return ret // Next advances the map iterator and reports whether there is another
// } // entry. It returns false when iter is exhausted; subsequent
// calls to Key, Value, or Next will panic.
func (iter *MapIter) Next() bool {
if !iter.m.IsValid() {
panic("MapIter.Next called on an iterator that does not have an associated map Value")
}
if !iter.hiter.initialized() {
mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
} else {
if mapiterkey(&iter.hiter) == nil {
panic("MapIter.Next called on exhausted iterator")
}
mapiternext(&iter.hiter)
}
return mapiterkey(&iter.hiter) != nil
}
// Reset modifies iter to iterate over v.
// It panics if v's Kind is not Map and v is not the zero Value.
// Reset(Value{}) causes iter to not to refer to any map,
// which may allow the previously iterated-over map to be garbage collected.
func (iter *MapIter) Reset(v Value) {
if v.IsValid() {
v.mustBe(Map)
}
iter.m = v
iter.hiter = hiter{}
}
// MapRange returns a range iterator for a map.
// It panics if v's Kind is not Map.
//
// Call Next to advance the iterator, and Key/Value to access each entry.
// Next returns false when the iterator is exhausted.
// MapRange follows the same iteration semantics as a range statement.
//
// Example:
//
// iter := reflect.ValueOf(m).MapRange()
// for iter.Next() {
// k := iter.Key()
// v := iter.Value()
// ...
// }
func (v Value) MapRange() *MapIter {
// This is inlinable to take advantage of "function outlining".
// The allocation of MapIter can be stack allocated if the caller
// does not allow it to escape.
// See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
if v.kind() != Map {
v.panicNotMap()
}
return &MapIter{m: v}
}
// Force slow panicking path not inlined, so it won't add to the
// inlining budget of the caller.
// TODO: undo when the inliner is no longer bottom-up only.
//
//go:noinline
func (f flag) panicNotMap() {
f.mustBe(Map)
}
// copyVal returns a Value containing the map key or value at ptr,
// allocating a new variable as needed.
func copyVal(typ *abi.Type, fl flag, ptr unsafe.Pointer) Value {
if typ.IfaceIndir() {
// Copy result so future changes to the map
// won't change the underlying value.
c := unsafe_New(typ)
typedmemmove(typ, c, ptr)
return Value{typ, c, fl | flagIndir}
}
return Value{typ, *(*unsafe.Pointer)(ptr), fl}
}
// methodReceiver returns information about the receiver // methodReceiver returns information about the receiver
// described by v. The Value v may or may not have the // described by v. The Value v may or may not have the
@@ -2612,6 +2590,51 @@ func chanlen(ch unsafe.Pointer) int
//go:linkname maplen github.com/goplus/llgo/internal/runtime.MapLen //go:linkname maplen github.com/goplus/llgo/internal/runtime.MapLen
func maplen(ch unsafe.Pointer) int func maplen(ch unsafe.Pointer) int
//go:linkname mapaccess github.com/goplus/llgo/internal/runtime.MapAccess1
func mapaccess(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
//go:linkname mapassign0 github.com/goplus/llgo/internal/runtime.MapAssign
func mapassign0(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
func mapassign(t *abi.Type, m unsafe.Pointer, key, val unsafe.Pointer) {
contentEscapes(key)
contentEscapes(val)
p := mapassign0(t, m, key)
runtime.Typedmemmove(t.Elem(), p, val)
}
// //go:noescape
// func mapassign_faststr0(t *abi.Type, m unsafe.Pointer, key string, val unsafe.Pointer)
// func mapassign_faststr(t *abi.Type, m unsafe.Pointer, key string, val unsafe.Pointer) {
// contentEscapes((*unsafeheader.String)(unsafe.Pointer(&key)).Data)
// contentEscapes(val)
// mapassign_faststr0(t, m, key, val)
// }
//go:linkname mapdelete github.com/goplus/llgo/internal/runtime.MapDelete
func mapdelete(t *abi.Type, m unsafe.Pointer, key unsafe.Pointer)
//go:noescape
// func mapdelete_faststr(t *abi.Type, m unsafe.Pointer, key string)
//go:linkname mapiterinit github.com/goplus/llgo/internal/runtime.mapiterinit
func mapiterinit(t *abi.Type, m unsafe.Pointer, it *hiter)
func mapiterkey(it *hiter) (key unsafe.Pointer) {
return it.key
}
func mapiterelem(it *hiter) (elem unsafe.Pointer) {
return it.elem
}
//go:linkname mapiternext github.com/goplus/llgo/internal/runtime.mapiternext
func mapiternext(it *hiter)
//go:linkname mapclear github.com/goplus/llgo/internal/runtime.mapclear
func mapclear(t *abi.Type, m unsafe.Pointer)
// MakeSlice creates a new zero-initialized slice value // MakeSlice creates a new zero-initialized slice value
// for the specified slice type, length, and capacity. // for the specified slice type, length, and capacity.
func MakeSlice(typ Type, len, cap int) Value { func MakeSlice(typ Type, len, cap int) Value {