diff --git a/c/c.go b/c/c.go index 4c5f4e13..e8faab82 100644 --- a/c/c.go +++ b/c/c.go @@ -66,6 +66,9 @@ func Free(ptr Pointer) //go:linkname Memcpy C.memcpy func Memcpy(dst, src Pointer, n uintptr) Pointer +//go:linkname Memmove C.memmove +func Memmove(dst, src Pointer, n uintptr) Pointer + //go:linkname Memset C.memset func Memset(s Pointer, c Int, n uintptr) Pointer diff --git a/internal/abi/type.go b/internal/abi/type.go index c3f0f9ed..3e7250b9 100644 --- a/internal/abi/type.go +++ b/internal/abi/type.go @@ -170,6 +170,24 @@ type MapType struct { Flags uint32 } +// Note: flag values must match those used in the TMAP case +// in ../cmd/compile/internal/reflectdata/reflect.go:writeType. +func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself + return mt.Flags&1 != 0 +} +func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself + return mt.Flags&2 != 0 +} +func (mt *MapType) ReflexiveKey() bool { // true if k==k for all keys + return mt.Flags&4 != 0 +} +func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite + return mt.Flags&8 != 0 +} +func (mt *MapType) HashMightPanic() bool { // true if hash function might panic + return mt.Flags&16 != 0 +} + type PtrType struct { Type Elem *Type // pointer element (pointed at) type diff --git a/internal/runtime/error.go b/internal/runtime/error.go new file mode 100644 index 00000000..a4205aa9 --- /dev/null +++ b/internal/runtime/error.go @@ -0,0 +1,334 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +/* +import "internal/bytealg" + +// The Error interface identifies a run time error. +type Error interface { + error + + // RuntimeError is a no-op function but + // serves to distinguish types that are run time + // errors from ordinary errors: a type is a + // run time error if it has a RuntimeError method. + RuntimeError() +} + +// A TypeAssertionError explains a failed type assertion. +type TypeAssertionError struct { + _interface *_type + concrete *_type + asserted *_type + missingMethod string // one method needed by Interface, missing from Concrete +} + +func (*TypeAssertionError) RuntimeError() {} + +func (e *TypeAssertionError) Error() string { + inter := "interface" + if e._interface != nil { + inter = toRType(e._interface).string() + } + as := toRType(e.asserted).string() + if e.concrete == nil { + return "interface conversion: " + inter + " is nil, not " + as + } + cs := toRType(e.concrete).string() + if e.missingMethod == "" { + msg := "interface conversion: " + inter + " is " + cs + ", not " + as + if cs == as { + // provide slightly clearer error message + if toRType(e.concrete).pkgpath() != toRType(e.asserted).pkgpath() { + msg += " (types from different packages)" + } else { + msg += " (types from different scopes)" + } + } + return msg + } + return "interface conversion: " + cs + " is not " + as + + ": missing method " + e.missingMethod +} + +// itoa converts val to a decimal representation. The result is +// written somewhere within buf and the location of the result is returned. +// buf must be at least 20 bytes. +// +//go:nosplit +func itoa(buf []byte, val uint64) []byte { + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return buf[i:] +} + +// An errorString represents a runtime error described by a single string. +type errorString string + +func (e errorString) RuntimeError() {} + +func (e errorString) Error() string { + return "runtime error: " + string(e) +} + +type errorAddressString struct { + msg string // error message + addr uintptr // memory address where the error occurred +} + +func (e errorAddressString) RuntimeError() {} + +func (e errorAddressString) Error() string { + return "runtime error: " + e.msg +} + +// Addr returns the memory address where a fault occurred. +// The address provided is best-effort. +// The veracity of the result may depend on the platform. +// Errors providing this method will only be returned as +// a result of using runtime/debug.SetPanicOnFault. +func (e errorAddressString) Addr() uintptr { + return e.addr +} +*/ + +// plainError represents a runtime error described a string without +// the prefix "runtime error: " after invoking errorString.Error(). +// See Issue #14965. +type plainError string + +func (e plainError) RuntimeError() {} + +func (e plainError) Error() string { + return string(e) +} + +/* +// A boundsError represents an indexing or slicing operation gone wrong. +type boundsError struct { + x int64 + y int + // Values in an index or slice expression can be signed or unsigned. + // That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1. + // Instead, we keep track of whether x should be interpreted as signed or unsigned. + // y is known to be nonnegative and to fit in an int. + signed bool + code boundsErrorCode +} + +type boundsErrorCode uint8 + +const ( + boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed + + boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed + boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed + boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen) + + boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed + boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed + boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen) + boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen) + + boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed + // Note: in the above, len(s) and cap(s) are stored in y +) + +// boundsErrorFmts provide error text for various out-of-bounds panics. +// Note: if you change these strings, you should adjust the size of the buffer +// in boundsError.Error below as well. +var boundsErrorFmts = [...]string{ + boundsIndex: "index out of range [%x] with length %y", + boundsSliceAlen: "slice bounds out of range [:%x] with length %y", + boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y", + boundsSliceB: "slice bounds out of range [%x:%y]", + boundsSlice3Alen: "slice bounds out of range [::%x] with length %y", + boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y", + boundsSlice3B: "slice bounds out of range [:%x:%y]", + boundsSlice3C: "slice bounds out of range [%x:%y:]", + boundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x", +} + +// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y. +var boundsNegErrorFmts = [...]string{ + boundsIndex: "index out of range [%x]", + boundsSliceAlen: "slice bounds out of range [:%x]", + boundsSliceAcap: "slice bounds out of range [:%x]", + boundsSliceB: "slice bounds out of range [%x:]", + boundsSlice3Alen: "slice bounds out of range [::%x]", + boundsSlice3Acap: "slice bounds out of range [::%x]", + boundsSlice3B: "slice bounds out of range [:%x:]", + boundsSlice3C: "slice bounds out of range [%x::]", +} + +func (e boundsError) RuntimeError() {} + +func appendIntStr(b []byte, v int64, signed bool) []byte { + if signed && v < 0 { + b = append(b, '-') + v = -v + } + var buf [20]byte + b = append(b, itoa(buf[:], uint64(v))...) + return b +} + +func (e boundsError) Error() string { + fmt := boundsErrorFmts[e.code] + if e.signed && e.x < 0 { + fmt = boundsNegErrorFmts[e.code] + } + // max message length is 99: "runtime error: slice bounds out of range [::%x] with capacity %y" + // x can be at most 20 characters. y can be at most 19. + b := make([]byte, 0, 100) + b = append(b, "runtime error: "...) + for i := 0; i < len(fmt); i++ { + c := fmt[i] + if c != '%' { + b = append(b, c) + continue + } + i++ + switch fmt[i] { + case 'x': + b = appendIntStr(b, e.x, e.signed) + case 'y': + b = appendIntStr(b, int64(e.y), true) + } + } + return string(b) +} + +type stringer interface { + String() string +} + +// printany prints an argument passed to panic. +// If panic is called with a value that has a String or Error method, +// it has already been converted into a string by preprintpanics. +func printany(i any) { + switch v := i.(type) { + case nil: + print("nil") + case bool: + print(v) + case int: + print(v) + case int8: + print(v) + case int16: + print(v) + case int32: + print(v) + case int64: + print(v) + case uint: + print(v) + case uint8: + print(v) + case uint16: + print(v) + case uint32: + print(v) + case uint64: + print(v) + case uintptr: + print(v) + case float32: + print(v) + case float64: + print(v) + case complex64: + print(v) + case complex128: + print(v) + case string: + print(v) + default: + printanycustomtype(i) + } +} + +func printanycustomtype(i any) { + eface := efaceOf(&i) + typestring := toRType(eface._type).string() + + switch eface._type.Kind_ { + case kindString: + print(typestring, `("`, *(*string)(eface.data), `")`) + case kindBool: + print(typestring, "(", *(*bool)(eface.data), ")") + case kindInt: + print(typestring, "(", *(*int)(eface.data), ")") + case kindInt8: + print(typestring, "(", *(*int8)(eface.data), ")") + case kindInt16: + print(typestring, "(", *(*int16)(eface.data), ")") + case kindInt32: + print(typestring, "(", *(*int32)(eface.data), ")") + case kindInt64: + print(typestring, "(", *(*int64)(eface.data), ")") + case kindUint: + print(typestring, "(", *(*uint)(eface.data), ")") + case kindUint8: + print(typestring, "(", *(*uint8)(eface.data), ")") + case kindUint16: + print(typestring, "(", *(*uint16)(eface.data), ")") + case kindUint32: + print(typestring, "(", *(*uint32)(eface.data), ")") + case kindUint64: + print(typestring, "(", *(*uint64)(eface.data), ")") + case kindUintptr: + print(typestring, "(", *(*uintptr)(eface.data), ")") + case kindFloat32: + print(typestring, "(", *(*float32)(eface.data), ")") + case kindFloat64: + print(typestring, "(", *(*float64)(eface.data), ")") + case kindComplex64: + print(typestring, *(*complex64)(eface.data)) + case kindComplex128: + print(typestring, *(*complex128)(eface.data)) + default: + print("(", typestring, ") ", eface.data) + } +} + +// panicwrap generates a panic for a call to a wrapped value method +// with a nil pointer receiver. +// +// It is called from the generated wrapper code. +func panicwrap() { + pc := getcallerpc() + name := funcNameForPrint(funcname(findfunc(pc))) + // name is something like "main.(*T).F". + // We want to extract pkg ("main"), typ ("T"), and meth ("F"). + // Do it by finding the parens. + i := bytealg.IndexByteString(name, '(') + if i < 0 { + throw("panicwrap: no ( in " + name) + } + pkg := name[:i-1] + if i+2 >= len(name) || name[i-1:i+2] != ".(*" { + throw("panicwrap: unexpected string after package name: " + name) + } + name = name[i+2:] + i = bytealg.IndexByteString(name, ')') + if i < 0 { + throw("panicwrap: no ) in " + name) + } + if i+2 >= len(name) || name[i:i+2] != ")." { + throw("panicwrap: unexpected string after type name: " + name) + } + typ := name[:i] + meth := name[i+2:] + panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer")) +} +*/ diff --git a/internal/runtime/malloc.go b/internal/runtime/malloc.go new file mode 100644 index 00000000..c23f1bd0 --- /dev/null +++ b/internal/runtime/malloc.go @@ -0,0 +1,339 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +// implementation of new builtin +// compiler (both frontend and SSA backend) knows the signature +// of this function. +func newobject(typ *_type) unsafe.Pointer { + return AllocZ(typ.Size_) +} + +/* +//go:linkname reflect_unsafe_New reflect.unsafe_New +func reflect_unsafe_New(typ *_type) unsafe.Pointer { + return mallocgc(typ.Size_, typ, true) +} + +//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New +func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { + return mallocgc(typ.Size_, typ, true) +} +*/ + +const mathMaxUintptr = ^uintptr(0) + +// mathMulUintptr returns a * b and whether the multiplication overflowed. +// On supported platforms this is an intrinsic lowered by the compiler. +func mathMulUintptr(a, b uintptr) (uintptr, bool) { + if a|b < 1<<(4*goarchPtrSize) || a == 0 { + return a * b, false + } + overflow := b > mathMaxUintptr/a + return a * b, overflow +} + +// newarray allocates an array of n elements of type typ. +func newarray(typ *_type, n int) unsafe.Pointer { + if n == 1 { + return AllocZ(typ.Size_) + } + mem, overflow := mathMulUintptr(typ.Size_, uintptr(n)) + if overflow || n < 0 { + panic(plainError("runtime: allocation size out of range")) + } + return AllocZ(mem) +} + +/* +//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray +func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { + return newarray(typ, n) +} + +func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { + c := getMCache(mp) + if c == nil { + throw("profilealloc called without a P or outside bootstrapping") + } + c.nextSample = nextSample() + mProf_Malloc(x, size) +} + +// nextSample returns the next sampling point for heap profiling. The goal is +// to sample allocations on average every MemProfileRate bytes, but with a +// completely random distribution over the allocation timeline; this +// corresponds to a Poisson process with parameter MemProfileRate. In Poisson +// processes, the distance between two samples follows the exponential +// distribution (exp(MemProfileRate)), so the best return value is a random +// number taken from an exponential distribution whose mean is MemProfileRate. +func nextSample() uintptr { + if MemProfileRate == 1 { + // Callers assign our return value to + // mcache.next_sample, but next_sample is not used + // when the rate is 1. So avoid the math below and + // just return something. + return 0 + } + if GOOS == "plan9" { + // Plan 9 doesn't support floating point in note handler. + if gp := getg(); gp == gp.m.gsignal { + return nextSampleNoFP() + } + } + + return uintptr(fastexprand(MemProfileRate)) +} + +// fastexprand returns a random number from an exponential distribution with +// the specified mean. +func fastexprand(mean int) int32 { + // Avoid overflow. Maximum possible step is + // -ln(1/(1< 0x7000000: + mean = 0x7000000 + case mean == 0: + return 0 + } + + // Take a random sample of the exponential distribution exp(-mean*x). + // The probability distribution function is mean*exp(-mean*x), so the CDF is + // p = 1 - exp(-mean*x), so + // q = 1 - p == exp(-mean*x) + // log_e(q) = -mean*x + // -log_e(q)/mean = x + // x = -log_e(q) * mean + // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency + const randomBitCount = 26 + q := fastrandn(1< 0 { + qlog = 0 + } + const minusLog2 = -0.6931471805599453 // -ln(2) + return int32(qlog*(minusLog2*float64(mean))) + 1 +} + +// nextSampleNoFP is similar to nextSample, but uses older, +// simpler code to avoid floating point. +func nextSampleNoFP() uintptr { + // Set first allocation sample size. + rate := MemProfileRate + if rate > 0x3fffffff { // make 2*rate not overflow + rate = 0x3fffffff + } + if rate != 0 { + return uintptr(fastrandn(uint32(2 * rate))) + } + return 0 +} + +type persistentAlloc struct { + base *notInHeap + off uintptr +} + +var globalAlloc struct { + mutex + persistentAlloc +} + +// persistentChunkSize is the number of bytes we allocate when we grow +// a persistentAlloc. +const persistentChunkSize = 256 << 10 + +// persistentChunks is a list of all the persistent chunks we have +// allocated. The list is maintained through the first word in the +// persistent chunk. This is updated atomically. +var persistentChunks *notInHeap + +// Wrapper around sysAlloc that can allocate small chunks. +// There is no associated free operation. +// Intended for things like function/type/debug-related persistent data. +// If align is 0, uses default align (currently 8). +// The returned memory will be zeroed. +// sysStat must be non-nil. +// +// Consider marking persistentalloc'd types not in heap by embedding +// runtime/internal/sys.NotInHeap. +func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { + var p *notInHeap + systemstack(func() { + p = persistentalloc1(size, align, sysStat) + }) + return unsafe.Pointer(p) +} + +// Must run on system stack because stack growth can (re)invoke it. +// See issue 9174. +// +//go:systemstack +func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { + const ( + maxBlock = 64 << 10 // VM reservation granularity is 64K on windows + ) + + if size == 0 { + throw("persistentalloc: size == 0") + } + if align != 0 { + if align&(align-1) != 0 { + throw("persistentalloc: align is not a power of 2") + } + if align > _PageSize { + throw("persistentalloc: align is too large") + } + } else { + align = 8 + } + + if size >= maxBlock { + return (*notInHeap)(sysAlloc(size, sysStat)) + } + + mp := acquirem() + var persistent *persistentAlloc + if mp != nil && mp.p != 0 { + persistent = &mp.p.ptr().palloc + } else { + lock(&globalAlloc.mutex) + persistent = &globalAlloc.persistentAlloc + } + persistent.off = alignUp(persistent.off, align) + if persistent.off+size > persistentChunkSize || persistent.base == nil { + persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) + if persistent.base == nil { + if persistent == &globalAlloc.persistentAlloc { + unlock(&globalAlloc.mutex) + } + throw("runtime: cannot allocate memory") + } + + // Add the new chunk to the persistentChunks list. + for { + chunks := uintptr(unsafe.Pointer(persistentChunks)) + *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks + if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { + break + } + } + persistent.off = alignUp(goarch.PtrSize, align) + } + p := persistent.base.add(persistent.off) + persistent.off += size + releasem(mp) + if persistent == &globalAlloc.persistentAlloc { + unlock(&globalAlloc.mutex) + } + + if sysStat != &memstats.other_sys { + sysStat.add(int64(size)) + memstats.other_sys.add(-int64(size)) + } + return p +} + +// inPersistentAlloc reports whether p points to memory allocated by +// persistentalloc. This must be nosplit because it is called by the +// cgo checker code, which is called by the write barrier code. +// +//go:nosplit +func inPersistentAlloc(p uintptr) bool { + chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) + for chunk != 0 { + if p >= chunk && p < chunk+persistentChunkSize { + return true + } + chunk = *(*uintptr)(unsafe.Pointer(chunk)) + } + return false +} + +// linearAlloc is a simple linear allocator that pre-reserves a region +// of memory and then optionally maps that region into the Ready state +// as needed. +// +// The caller is responsible for locking. +type linearAlloc struct { + next uintptr // next free byte + mapped uintptr // one byte past end of mapped space + end uintptr // end of reserved space + + mapMemory bool // transition memory from Reserved to Ready if true +} + +func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { + if base+size < base { + // Chop off the last byte. The runtime isn't prepared + // to deal with situations where the bounds could overflow. + // Leave that memory reserved, though, so we don't map it + // later. + size -= 1 + } + l.next, l.mapped = base, base + l.end = base + size + l.mapMemory = mapMemory +} + +func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { + p := alignUp(l.next, align) + if p+size > l.end { + return nil + } + l.next = p + size + if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { + if l.mapMemory { + // Transition from Reserved to Prepared to Ready. + n := pEnd - l.mapped + sysMap(unsafe.Pointer(l.mapped), n, sysStat) + sysUsed(unsafe.Pointer(l.mapped), n, n) + } + l.mapped = pEnd + } + return unsafe.Pointer(p) +} + +// notInHeap is off-heap memory allocated by a lower-level allocator +// like sysAlloc or persistentAlloc. +// +// In general, it's better to use real types which embed +// runtime/internal/sys.NotInHeap, but this serves as a generic type +// for situations where that isn't possible (like in the allocators). +// +// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? +type notInHeap struct{ _ sys.NotInHeap } + +func (p *notInHeap) add(bytes uintptr) *notInHeap { + return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) +} + +// computeRZlog computes the size of the redzone. +// Refer to the implementation of the compiler-rt. +func computeRZlog(userSize uintptr) uintptr { + switch { + case userSize <= (64 - 16): + return 16 << 0 + case userSize <= (128 - 32): + return 16 << 1 + case userSize <= (512 - 64): + return 16 << 2 + case userSize <= (4096 - 128): + return 16 << 3 + case userSize <= (1<<14)-256: + return 16 << 4 + case userSize <= (1<<15)-512: + return 16 << 5 + case userSize <= (1<<16)-1024: + return 16 << 6 + default: + return 16 << 7 + } +} +*/ diff --git a/internal/runtime/map.go b/internal/runtime/map.go index 4c83a751..ce69bb34 100644 --- a/internal/runtime/map.go +++ b/internal/runtime/map.go @@ -59,6 +59,12 @@ import ( "github.com/goplus/llgo/internal/abi" ) +type maptype = abi.MapType + +const ( + goarchPtrSize = unsafe.Sizeof(uintptr(0)) +) + const ( // Maximum number of key/elem pairs a bucket can hold. bucketCntBits = abi.MapBucketCountBits @@ -74,8 +80,9 @@ const ( // Must fit in a uint8. // Fast versions cannot handle big elems - the cutoff size for // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem. - maxKeySize = abi.MapMaxKeyBytes - maxElemSize = abi.MapMaxElemBytes + // + // maxKeySize = abi.MapMaxKeyBytes + // maxElemSize = abi.MapMaxElemBytes // data offset should be the size of the bmap struct, but needs to be // aligned correctly. For amd64p32 this means 64-bit alignment @@ -179,11 +186,12 @@ type hiter struct { bucket uintptr checkBucket uintptr } +*/ // bucketShift returns 1<> (goarch.PtrSize*8 - 8)) + top := uint8(hash >> (goarchPtrSize*8 - 8)) if top < minTopHash { top += minTopHash } @@ -206,16 +214,18 @@ func evacuated(b *bmap) bool { } func (b *bmap) overflow(t *maptype) *bmap { - return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) + return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarchPtrSize)) } func (b *bmap) setoverflow(t *maptype, ovf *bmap) { - *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf + *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarchPtrSize)) = ovf } +/* func (b *bmap) keys() unsafe.Pointer { return add(unsafe.Pointer(b), dataOffset) } +*/ // incrnoverflow increments h.noverflow. // noverflow counts the number of overflow buckets. @@ -280,6 +290,7 @@ func (h *hmap) createOverflow() { } } +/* func makemap64(t *maptype, hint int64, h *hmap) *hmap { if int64(int(hint)) != hint { hint = 0 @@ -337,6 +348,7 @@ func makemap(t *maptype, hint int, h *hmap) *hmap { return h } +*/ // makeBucketArray initializes a backing array for map buckets. // 1<