build: separate compiler and libs
This commit is contained in:
315
compiler/internal/runtime/alg.go
Normal file
315
compiler/internal/runtime/alg.go
Normal file
@@ -0,0 +1,315 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/compiler/internal/abi"
|
||||
"github.com/goplus/llgo/compiler/internal/runtime/goarch"
|
||||
)
|
||||
|
||||
const (
|
||||
c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
|
||||
c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
|
||||
)
|
||||
|
||||
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
|
||||
return h
|
||||
}
|
||||
|
||||
func memhash8(p unsafe.Pointer, h uintptr) uintptr {
|
||||
return memhash(p, h, 1)
|
||||
}
|
||||
|
||||
func memhash16(p unsafe.Pointer, h uintptr) uintptr {
|
||||
return memhash(p, h, 2)
|
||||
}
|
||||
|
||||
func memhash128(p unsafe.Pointer, h uintptr) uintptr {
|
||||
return memhash(p, h, 16)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
// func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
|
||||
// ptr := getclosureptr()
|
||||
// size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
|
||||
// return memhash(p, h, size)
|
||||
// }
|
||||
|
||||
// in asm_*.s
|
||||
// func memhash(p unsafe.Pointer, h, s uintptr) uintptr
|
||||
// func memhash32(p unsafe.Pointer, h uintptr) uintptr
|
||||
// func memhash64(p unsafe.Pointer, h uintptr) uintptr
|
||||
// func strhash(p unsafe.Pointer, h uintptr) uintptr
|
||||
|
||||
func strhash(a unsafe.Pointer, h uintptr) uintptr {
|
||||
x := (*String)(a)
|
||||
return memhash(x.data, h, uintptr(x.len))
|
||||
}
|
||||
|
||||
// NOTE: Because NaN != NaN, a map can contain any
|
||||
// number of (mostly useless) entries keyed with NaNs.
|
||||
// To avoid long hash chains, we assign a random number
|
||||
// as the hash value for a NaN.
|
||||
|
||||
func f32hash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
f := *(*float32)(p)
|
||||
switch {
|
||||
case f == 0:
|
||||
return c1 * (c0 ^ h) // +0, -0
|
||||
case f != f:
|
||||
return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
|
||||
default:
|
||||
return memhash(p, h, 4)
|
||||
}
|
||||
}
|
||||
|
||||
func f64hash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
f := *(*float64)(p)
|
||||
switch {
|
||||
case f == 0:
|
||||
return c1 * (c0 ^ h) // +0, -0
|
||||
case f != f:
|
||||
return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
|
||||
default:
|
||||
return memhash(p, h, 8)
|
||||
}
|
||||
}
|
||||
|
||||
func c64hash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
x := (*[2]float32)(p)
|
||||
return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
|
||||
}
|
||||
|
||||
func c128hash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
x := (*[2]float64)(p)
|
||||
return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
|
||||
}
|
||||
|
||||
func interhash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
a := (*iface)(p)
|
||||
tab := a.tab
|
||||
if tab == nil {
|
||||
return h
|
||||
}
|
||||
t := tab._type
|
||||
if t.Equal == nil {
|
||||
// Check hashability here. We could do this check inside
|
||||
// typehash, but we want to report the topmost type in
|
||||
// the error text (e.g. in a struct with a field of slice type
|
||||
// we want to report the struct, not the slice).
|
||||
panic(errorString("hash of unhashable type " + t.String()))
|
||||
}
|
||||
if isDirectIface(t) {
|
||||
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
|
||||
} else {
|
||||
return c1 * typehash(t, a.data, h^c0)
|
||||
}
|
||||
}
|
||||
|
||||
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
|
||||
a := (*eface)(p)
|
||||
t := a._type
|
||||
if t == nil {
|
||||
return h
|
||||
}
|
||||
if t.Equal == nil {
|
||||
// See comment in interhash above.
|
||||
panic(errorString("hash of unhashable type " + t.String()))
|
||||
}
|
||||
if isDirectIface(t) {
|
||||
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
|
||||
} else {
|
||||
return c1 * typehash(t, a.data, h^c0)
|
||||
}
|
||||
}
|
||||
|
||||
// typehash computes the hash of the object of type t at address p.
|
||||
// h is the seed.
|
||||
// This function is seldom used. Most maps use for hashing either
|
||||
// fixed functions (e.g. f32hash) or compiler-generated functions
|
||||
// (e.g. for a type like struct { x, y string }). This implementation
|
||||
// is slower but more general and is used for hashing interface types
|
||||
// (called from interhash or nilinterhash, above) or for hashing in
|
||||
// maps generated by reflect.MapOf (reflect_typehash, below).
|
||||
// Note: this function must match the compiler generated
|
||||
// functions exactly. See issue 37716.
|
||||
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
|
||||
if t.TFlag&abi.TFlagRegularMemory != 0 {
|
||||
// Handle ptr sizes specially, see issue 37086.
|
||||
switch t.Size_ {
|
||||
case 4:
|
||||
return memhash32(p, h)
|
||||
case 8:
|
||||
return memhash64(p, h)
|
||||
default:
|
||||
return memhash(p, h, t.Size_)
|
||||
}
|
||||
}
|
||||
switch t.Kind() {
|
||||
case abi.Float32:
|
||||
return f32hash(p, h)
|
||||
case abi.Float64:
|
||||
return f64hash(p, h)
|
||||
case abi.Complex64:
|
||||
return c64hash(p, h)
|
||||
case abi.Complex128:
|
||||
return c128hash(p, h)
|
||||
case abi.String:
|
||||
return strhash(p, h)
|
||||
case abi.Interface:
|
||||
i := (*interfacetype)(unsafe.Pointer(t))
|
||||
if len(i.Methods) == 0 {
|
||||
return nilinterhash(p, h)
|
||||
}
|
||||
return interhash(p, h)
|
||||
case abi.Array:
|
||||
a := (*arraytype)(unsafe.Pointer(t))
|
||||
for i := uintptr(0); i < a.Len; i++ {
|
||||
h = typehash(a.Elem, add(p, i*a.Elem.Size_), h)
|
||||
}
|
||||
return h
|
||||
case abi.Struct:
|
||||
s := (*structtype)(unsafe.Pointer(t))
|
||||
for _, f := range s.Fields {
|
||||
if f.Name_ == "_" {
|
||||
continue
|
||||
}
|
||||
h = typehash(f.Typ, add(p, f.Offset), h)
|
||||
}
|
||||
return h
|
||||
default:
|
||||
// Should never happen, as typehash should only be called
|
||||
// with comparable types.
|
||||
panic(errorString("hash of unhashable type " + t.String()))
|
||||
}
|
||||
}
|
||||
|
||||
func memequalptr(p, q unsafe.Pointer) bool {
|
||||
return *(*uintptr)(p) == *(*uintptr)(q)
|
||||
}
|
||||
func memequal0(p, q unsafe.Pointer) bool {
|
||||
return true
|
||||
}
|
||||
func memequal8(p, q unsafe.Pointer) bool {
|
||||
return *(*int8)(p) == *(*int8)(q)
|
||||
}
|
||||
func memequal16(p, q unsafe.Pointer) bool {
|
||||
return *(*int16)(p) == *(*int16)(q)
|
||||
}
|
||||
func memequal32(p, q unsafe.Pointer) bool {
|
||||
return *(*int32)(p) == *(*int32)(q)
|
||||
}
|
||||
func memequal64(p, q unsafe.Pointer) bool {
|
||||
return *(*int64)(p) == *(*int64)(q)
|
||||
}
|
||||
func memequal128(p, q unsafe.Pointer) bool {
|
||||
return *(*[2]int64)(p) == *(*[2]int64)(q)
|
||||
}
|
||||
func f32equal(p, q unsafe.Pointer) bool {
|
||||
return *(*float32)(p) == *(*float32)(q)
|
||||
}
|
||||
func f64equal(p, q unsafe.Pointer) bool {
|
||||
return *(*float64)(p) == *(*float64)(q)
|
||||
}
|
||||
func c64equal(p, q unsafe.Pointer) bool {
|
||||
return *(*complex64)(p) == *(*complex64)(q)
|
||||
}
|
||||
func c128equal(p, q unsafe.Pointer) bool {
|
||||
return *(*complex128)(p) == *(*complex128)(q)
|
||||
}
|
||||
func strequal(p, q unsafe.Pointer) bool {
|
||||
return *(*string)(p) == *(*string)(q)
|
||||
}
|
||||
func interequal(p, q unsafe.Pointer) bool {
|
||||
x := *(*iface)(p)
|
||||
y := *(*iface)(q)
|
||||
return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data)
|
||||
}
|
||||
func nilinterequal(p, q unsafe.Pointer) bool {
|
||||
x := *(*eface)(p)
|
||||
y := *(*eface)(q)
|
||||
return x._type == y._type && efaceeq(x._type, x.data, y.data)
|
||||
}
|
||||
func efaceeq(t *_type, x, y unsafe.Pointer) bool {
|
||||
if t == nil {
|
||||
return true
|
||||
}
|
||||
eq := t.Equal
|
||||
if eq == nil {
|
||||
panic(errorString("comparing uncomparable type " + t.Str_).Error())
|
||||
}
|
||||
if isDirectIface(t) {
|
||||
// Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof.
|
||||
// Maps and funcs are not comparable, so they can't reach here.
|
||||
// Ptrs, chans, and single-element items can be compared directly using ==.
|
||||
return x == y
|
||||
}
|
||||
return eq(x, y)
|
||||
}
|
||||
func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
|
||||
if tab == nil {
|
||||
return true
|
||||
}
|
||||
t := tab._type
|
||||
eq := t.Equal
|
||||
if eq == nil {
|
||||
panic(errorString("comparing uncomparable type " + t.Str_).Error())
|
||||
}
|
||||
if isDirectIface(t) {
|
||||
// See comment in efaceeq.
|
||||
return x == y
|
||||
}
|
||||
return eq(x, y)
|
||||
}
|
||||
|
||||
// Testing adapters for hash quality tests (see hash_test.go)
|
||||
func stringHash(s string, seed uintptr) uintptr {
|
||||
return strhash(noescape(unsafe.Pointer(&s)), seed)
|
||||
}
|
||||
|
||||
func bytesHash(b []byte, seed uintptr) uintptr {
|
||||
s := (*slice)(unsafe.Pointer(&b))
|
||||
return memhash(s.array, seed, uintptr(s.len))
|
||||
}
|
||||
|
||||
func int32Hash(i uint32, seed uintptr) uintptr {
|
||||
return memhash32(noescape(unsafe.Pointer(&i)), seed)
|
||||
}
|
||||
|
||||
func int64Hash(i uint64, seed uintptr) uintptr {
|
||||
return memhash64(noescape(unsafe.Pointer(&i)), seed)
|
||||
}
|
||||
|
||||
func efaceHash(i any, seed uintptr) uintptr {
|
||||
return nilinterhash(noescape(unsafe.Pointer(&i)), seed)
|
||||
}
|
||||
|
||||
func ifaceHash(i interface {
|
||||
F()
|
||||
}, seed uintptr) uintptr {
|
||||
return interhash(noescape(unsafe.Pointer(&i)), seed)
|
||||
}
|
||||
|
||||
var hashkey [4]uintptr
|
||||
|
||||
// Note: These routines perform the read with a native endianness.
|
||||
func readUnaligned32(p unsafe.Pointer) uint32 {
|
||||
q := (*[4]byte)(p)
|
||||
if goarch.BigEndian {
|
||||
return uint32(q[3]) | uint32(q[2])<<8 | uint32(q[1])<<16 | uint32(q[0])<<24
|
||||
}
|
||||
return uint32(q[0]) | uint32(q[1])<<8 | uint32(q[2])<<16 | uint32(q[3])<<24
|
||||
}
|
||||
|
||||
func readUnaligned64(p unsafe.Pointer) uint64 {
|
||||
q := (*[8]byte)(p)
|
||||
if goarch.BigEndian {
|
||||
return uint64(q[7]) | uint64(q[6])<<8 | uint64(q[5])<<16 | uint64(q[4])<<24 |
|
||||
uint64(q[3])<<32 | uint64(q[2])<<40 | uint64(q[1])<<48 | uint64(q[0])<<56
|
||||
}
|
||||
return uint64(q[0]) | uint64(q[1])<<8 | uint64(q[2])<<16 | uint64(q[3])<<24 | uint64(q[4])<<32 | uint64(q[5])<<40 | uint64(q[6])<<48 | uint64(q[7])<<56
|
||||
}
|
||||
116
compiler/internal/runtime/errors.go
Normal file
116
compiler/internal/runtime/errors.go
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
// A boundsError represents an indexing or slicing operation gone wrong.
|
||||
type boundsError struct {
|
||||
x int64
|
||||
y int
|
||||
// Values in an index or slice expression can be signed or unsigned.
|
||||
// That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1.
|
||||
// Instead, we keep track of whether x should be interpreted as signed or unsigned.
|
||||
// y is known to be nonnegative and to fit in an int.
|
||||
signed bool
|
||||
code boundsErrorCode
|
||||
}
|
||||
|
||||
type boundsErrorCode uint8
|
||||
|
||||
const (
|
||||
boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed
|
||||
|
||||
boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed
|
||||
boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed
|
||||
boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen)
|
||||
|
||||
boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed
|
||||
boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed
|
||||
boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen)
|
||||
boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen)
|
||||
|
||||
boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed
|
||||
// Note: in the above, len(s) and cap(s) are stored in y
|
||||
)
|
||||
|
||||
// boundsErrorFmts provide error text for various out-of-bounds panics.
|
||||
// Note: if you change these strings, you should adjust the size of the buffer
|
||||
// in boundsError.Error below as well.
|
||||
var boundsErrorFmts = [...]string{
|
||||
boundsIndex: "index out of range [%x] with length %y",
|
||||
boundsSliceAlen: "slice bounds out of range [:%x] with length %y",
|
||||
boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y",
|
||||
boundsSliceB: "slice bounds out of range [%x:%y]",
|
||||
boundsSlice3Alen: "slice bounds out of range [::%x] with length %y",
|
||||
boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y",
|
||||
boundsSlice3B: "slice bounds out of range [:%x:%y]",
|
||||
boundsSlice3C: "slice bounds out of range [%x:%y:]",
|
||||
boundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x",
|
||||
}
|
||||
|
||||
// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.
|
||||
var boundsNegErrorFmts = [...]string{
|
||||
boundsIndex: "index out of range [%x]",
|
||||
boundsSliceAlen: "slice bounds out of range [:%x]",
|
||||
boundsSliceAcap: "slice bounds out of range [:%x]",
|
||||
boundsSliceB: "slice bounds out of range [%x:]",
|
||||
boundsSlice3Alen: "slice bounds out of range [::%x]",
|
||||
boundsSlice3Acap: "slice bounds out of range [::%x]",
|
||||
boundsSlice3B: "slice bounds out of range [:%x:]",
|
||||
boundsSlice3C: "slice bounds out of range [%x::]",
|
||||
}
|
||||
|
||||
func (e boundsError) RuntimeError() {}
|
||||
|
||||
func appendIntStr(b []byte, v int64, signed bool) []byte {
|
||||
if signed && v < 0 {
|
||||
b = append(b, '-')
|
||||
v = -v
|
||||
}
|
||||
var buf [20]byte
|
||||
b = append(b, itoa(buf[:], uint64(v))...)
|
||||
return b
|
||||
}
|
||||
|
||||
func (e boundsError) Error() string {
|
||||
fmt := boundsErrorFmts[e.code]
|
||||
if e.signed && e.x < 0 {
|
||||
fmt = boundsNegErrorFmts[e.code]
|
||||
}
|
||||
// max message length is 99: "runtime error: slice bounds out of range [::%x] with capacity %y"
|
||||
// x can be at most 20 characters. y can be at most 19.
|
||||
b := make([]byte, 0, 100)
|
||||
b = append(b, "runtime error: "...)
|
||||
for i := 0; i < len(fmt); i++ {
|
||||
c := fmt[i]
|
||||
if c != '%' {
|
||||
b = append(b, c)
|
||||
continue
|
||||
}
|
||||
i++
|
||||
switch fmt[i] {
|
||||
case 'x':
|
||||
b = appendIntStr(b, e.x, e.signed)
|
||||
case 'y':
|
||||
b = appendIntStr(b, int64(e.y), true)
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func itoa(buf []byte, val uint64) []byte {
|
||||
i := len(buf) - 1
|
||||
for val >= 10 {
|
||||
buf[i] = byte(val%10 + '0')
|
||||
i--
|
||||
val /= 10
|
||||
}
|
||||
buf[i] = byte(val + '0')
|
||||
return buf[i:]
|
||||
}
|
||||
|
||||
// failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
|
||||
func PanicSliceConvert(x int, y int) {
|
||||
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
|
||||
}
|
||||
7
compiler/internal/runtime/goarch/endian_big.go
Normal file
7
compiler/internal/runtime/goarch/endian_big.go
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm
|
||||
// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm
|
||||
|
||||
package goarch
|
||||
|
||||
const BigEndian = true
|
||||
const LittleEndian = false
|
||||
9
compiler/internal/runtime/goarch/endian_little.go
Normal file
9
compiler/internal/runtime/goarch/endian_little.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build ppc64 || s390x || mips || mips64
|
||||
// +build ppc64 s390x mips mips64
|
||||
|
||||
package goarch
|
||||
|
||||
const (
|
||||
BigEndian = false
|
||||
LittleEndian = true
|
||||
)
|
||||
3
compiler/internal/runtime/goarch/goarch.go
Normal file
3
compiler/internal/runtime/goarch/goarch.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package goarch
|
||||
|
||||
const PtrSize = 4 << (^uintptr(0) >> 63)
|
||||
61
compiler/internal/runtime/hash32.go
Normal file
61
compiler/internal/runtime/hash32.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hashing algorithm inspired by
|
||||
// wyhash: https://github.com/wangyi-fudan/wyhash/blob/ceb019b530e2c1c14d70b79bfa2bc49de7d95bc1/Modern%20Non-Cryptographic%20Hash%20Function%20and%20Pseudorandom%20Number%20Generator.pdf
|
||||
|
||||
//go:build 386 || arm || mips || mipsle
|
||||
|
||||
package runtime
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func memhash32(p unsafe.Pointer, seed uintptr) uintptr {
|
||||
a, b := mix32(uint32(seed), uint32(4^hashkey[0]))
|
||||
t := readUnaligned32(p)
|
||||
a ^= t
|
||||
b ^= t
|
||||
a, b = mix32(a, b)
|
||||
a, b = mix32(a, b)
|
||||
return uintptr(a ^ b)
|
||||
}
|
||||
|
||||
func memhash64(p unsafe.Pointer, seed uintptr) uintptr {
|
||||
a, b := mix32(uint32(seed), uint32(8^hashkey[0]))
|
||||
a ^= readUnaligned32(p)
|
||||
b ^= readUnaligned32(add(p, 4))
|
||||
a, b = mix32(a, b)
|
||||
a, b = mix32(a, b)
|
||||
return uintptr(a ^ b)
|
||||
}
|
||||
|
||||
func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
|
||||
a, b := mix32(uint32(seed), uint32(s^hashkey[0]))
|
||||
if s == 0 {
|
||||
return uintptr(a ^ b)
|
||||
}
|
||||
for ; s > 8; s -= 8 {
|
||||
a ^= readUnaligned32(p)
|
||||
b ^= readUnaligned32(add(p, 4))
|
||||
a, b = mix32(a, b)
|
||||
p = add(p, 8)
|
||||
}
|
||||
if s >= 4 {
|
||||
a ^= readUnaligned32(p)
|
||||
b ^= readUnaligned32(add(p, s-4))
|
||||
} else {
|
||||
t := uint32(*(*byte)(p))
|
||||
t |= uint32(*(*byte)(add(p, s>>1))) << 8
|
||||
t |= uint32(*(*byte)(add(p, s-1))) << 16
|
||||
b ^= t
|
||||
}
|
||||
a, b = mix32(a, b)
|
||||
a, b = mix32(a, b)
|
||||
return uintptr(a ^ b)
|
||||
}
|
||||
|
||||
func mix32(a, b uint32) (uint32, uint32) {
|
||||
c := uint64(a^uint32(hashkey[1])) * uint64(b^uint32(hashkey[2]))
|
||||
return uint32(c), uint32(c >> 32)
|
||||
}
|
||||
93
compiler/internal/runtime/hash64.go
Normal file
93
compiler/internal/runtime/hash64.go
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hashing algorithm inspired by
|
||||
// wyhash: https://github.com/wangyi-fudan/wyhash
|
||||
|
||||
//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/compiler/internal/runtime/math"
|
||||
)
|
||||
|
||||
const (
|
||||
m1 = 0xa0761d6478bd642f
|
||||
m2 = 0xe7037ed1a0b428db
|
||||
m3 = 0x8ebc6af09c88c6e3
|
||||
m4 = 0x589965cc75374cc3
|
||||
m5 = 0x1d8e4e27c47d124f
|
||||
)
|
||||
|
||||
func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
|
||||
var a, b uintptr
|
||||
seed ^= hashkey[0] ^ m1
|
||||
switch {
|
||||
case s == 0:
|
||||
return seed
|
||||
case s < 4:
|
||||
a = uintptr(*(*byte)(p))
|
||||
a |= uintptr(*(*byte)(add(p, s>>1))) << 8
|
||||
a |= uintptr(*(*byte)(add(p, s-1))) << 16
|
||||
case s == 4:
|
||||
a = r4(p)
|
||||
b = a
|
||||
case s < 8:
|
||||
a = r4(p)
|
||||
b = r4(add(p, s-4))
|
||||
case s == 8:
|
||||
a = r8(p)
|
||||
b = a
|
||||
case s <= 16:
|
||||
a = r8(p)
|
||||
b = r8(add(p, s-8))
|
||||
default:
|
||||
l := s
|
||||
if l > 48 {
|
||||
seed1 := seed
|
||||
seed2 := seed
|
||||
for ; l > 48; l -= 48 {
|
||||
seed = mix(r8(p)^m2, r8(add(p, 8))^seed)
|
||||
seed1 = mix(r8(add(p, 16))^m3, r8(add(p, 24))^seed1)
|
||||
seed2 = mix(r8(add(p, 32))^m4, r8(add(p, 40))^seed2)
|
||||
p = add(p, 48)
|
||||
}
|
||||
seed ^= seed1 ^ seed2
|
||||
}
|
||||
for ; l > 16; l -= 16 {
|
||||
seed = mix(r8(p)^m2, r8(add(p, 8))^seed)
|
||||
p = add(p, 16)
|
||||
}
|
||||
a = r8(add(p, l-16))
|
||||
b = r8(add(p, l-8))
|
||||
}
|
||||
|
||||
return mix(m5^s, mix(a^m2, b^seed))
|
||||
}
|
||||
|
||||
func memhash32(p unsafe.Pointer, seed uintptr) uintptr {
|
||||
a := r4(p)
|
||||
return mix(m5^4, mix(a^m2, a^seed^hashkey[0]^m1))
|
||||
}
|
||||
|
||||
func memhash64(p unsafe.Pointer, seed uintptr) uintptr {
|
||||
a := r8(p)
|
||||
return mix(m5^8, mix(a^m2, a^seed^hashkey[0]^m1))
|
||||
}
|
||||
|
||||
func mix(a, b uintptr) uintptr {
|
||||
hi, lo := math.Mul64(uint64(a), uint64(b))
|
||||
return uintptr(hi ^ lo)
|
||||
}
|
||||
|
||||
func r4(p unsafe.Pointer) uintptr {
|
||||
return uintptr(readUnaligned32(p))
|
||||
}
|
||||
|
||||
func r8(p unsafe.Pointer) uintptr {
|
||||
return uintptr(readUnaligned64(p))
|
||||
}
|
||||
1727
compiler/internal/runtime/map.go
Normal file
1727
compiler/internal/runtime/map.go
Normal file
File diff suppressed because it is too large
Load Diff
36
compiler/internal/runtime/math/math.go
Normal file
36
compiler/internal/runtime/math/math.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package math
|
||||
|
||||
import "github.com/goplus/llgo/compiler/internal/runtime/goarch"
|
||||
|
||||
const MaxUintptr = ^uintptr(0)
|
||||
|
||||
// MulUintptr returns a * b and whether the multiplication overflowed.
|
||||
// On supported platforms this is an intrinsic lowered by the compiler.
|
||||
func MulUintptr(a, b uintptr) (uintptr, bool) {
|
||||
if a|b < 1<<(4*goarch.PtrSize) || a == 0 {
|
||||
return a * b, false
|
||||
}
|
||||
overflow := b > MaxUintptr/a
|
||||
return a * b, overflow
|
||||
}
|
||||
|
||||
// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y
|
||||
// with the product bits' upper half returned in hi and the lower
|
||||
// half returned in lo.
|
||||
// This is a copy from math/bits.Mul64
|
||||
// On supported platforms this is an intrinsic lowered by the compiler.
|
||||
func Mul64(x, y uint64) (hi, lo uint64) {
|
||||
const mask32 = 1<<32 - 1
|
||||
x0 := x & mask32
|
||||
x1 := x >> 32
|
||||
y0 := y & mask32
|
||||
y1 := y >> 32
|
||||
w0 := x0 * y0
|
||||
t := x1*y0 + w0>>32
|
||||
w1 := t & mask32
|
||||
w2 := t >> 32
|
||||
w1 += x0 * y1
|
||||
hi = x1*y1 + w2 + w1>>32
|
||||
lo = x * y
|
||||
return
|
||||
}
|
||||
332
compiler/internal/runtime/mbarrier.go
Normal file
332
compiler/internal/runtime/mbarrier.go
Normal file
@@ -0,0 +1,332 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Garbage collector: write barriers.
|
||||
//
|
||||
// For the concurrent garbage collector, the Go compiler implements
|
||||
// updates to pointer-valued fields that may be in heap objects by
|
||||
// emitting calls to write barriers. The main write barrier for
|
||||
// individual pointer writes is gcWriteBarrier and is implemented in
|
||||
// assembly. This file contains write barrier entry points for bulk
|
||||
// operations. See also mwbbuf.go.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
)
|
||||
|
||||
// Go uses a hybrid barrier that combines a Yuasa-style deletion
|
||||
// barrier—which shades the object whose reference is being
|
||||
// overwritten—with Dijkstra insertion barrier—which shades the object
|
||||
// whose reference is being written. The insertion part of the barrier
|
||||
// is necessary while the calling goroutine's stack is grey. In
|
||||
// pseudocode, the barrier is:
|
||||
//
|
||||
// writePointer(slot, ptr):
|
||||
// shade(*slot)
|
||||
// if current stack is grey:
|
||||
// shade(ptr)
|
||||
// *slot = ptr
|
||||
//
|
||||
// slot is the destination in Go code.
|
||||
// ptr is the value that goes into the slot in Go code.
|
||||
//
|
||||
// Shade indicates that it has seen a white pointer by adding the referent
|
||||
// to wbuf as well as marking it.
|
||||
//
|
||||
// The two shades and the condition work together to prevent a mutator
|
||||
// from hiding an object from the garbage collector:
|
||||
//
|
||||
// 1. shade(*slot) prevents a mutator from hiding an object by moving
|
||||
// the sole pointer to it from the heap to its stack. If it attempts
|
||||
// to unlink an object from the heap, this will shade it.
|
||||
//
|
||||
// 2. shade(ptr) prevents a mutator from hiding an object by moving
|
||||
// the sole pointer to it from its stack into a black object in the
|
||||
// heap. If it attempts to install the pointer into a black object,
|
||||
// this will shade it.
|
||||
//
|
||||
// 3. Once a goroutine's stack is black, the shade(ptr) becomes
|
||||
// unnecessary. shade(ptr) prevents hiding an object by moving it from
|
||||
// the stack to the heap, but this requires first having a pointer
|
||||
// hidden on the stack. Immediately after a stack is scanned, it only
|
||||
// points to shaded objects, so it's not hiding anything, and the
|
||||
// shade(*slot) prevents it from hiding any other pointers on its
|
||||
// stack.
|
||||
//
|
||||
// For a detailed description of this barrier and proof of
|
||||
// correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
|
||||
//
|
||||
//
|
||||
//
|
||||
// Dealing with memory ordering:
|
||||
//
|
||||
// Both the Yuasa and Dijkstra barriers can be made conditional on the
|
||||
// color of the object containing the slot. We chose not to make these
|
||||
// conditional because the cost of ensuring that the object holding
|
||||
// the slot doesn't concurrently change color without the mutator
|
||||
// noticing seems prohibitive.
|
||||
//
|
||||
// Consider the following example where the mutator writes into
|
||||
// a slot and then loads the slot's mark bit while the GC thread
|
||||
// writes to the slot's mark bit and then as part of scanning reads
|
||||
// the slot.
|
||||
//
|
||||
// Initially both [slot] and [slotmark] are 0 (nil)
|
||||
// Mutator thread GC thread
|
||||
// st [slot], ptr st [slotmark], 1
|
||||
//
|
||||
// ld r1, [slotmark] ld r2, [slot]
|
||||
//
|
||||
// Without an expensive memory barrier between the st and the ld, the final
|
||||
// result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
|
||||
// example of what can happen when loads are allowed to be reordered with older
|
||||
// stores (avoiding such reorderings lies at the heart of the classic
|
||||
// Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
|
||||
// barriers, which will slow down both the mutator and the GC, we always grey
|
||||
// the ptr object regardless of the slot's color.
|
||||
//
|
||||
// Another place where we intentionally omit memory barriers is when
|
||||
// accessing mheap_.arena_used to check if a pointer points into the
|
||||
// heap. On relaxed memory machines, it's possible for a mutator to
|
||||
// extend the size of the heap by updating arena_used, allocate an
|
||||
// object from this new region, and publish a pointer to that object,
|
||||
// but for tracing running on another processor to observe the pointer
|
||||
// but use the old value of arena_used. In this case, tracing will not
|
||||
// mark the object, even though it's reachable. However, the mutator
|
||||
// is guaranteed to execute a write barrier when it publishes the
|
||||
// pointer, so it will take care of marking the object. A general
|
||||
// consequence of this is that the garbage collector may cache the
|
||||
// value of mheap_.arena_used. (See issue #9984.)
|
||||
//
|
||||
//
|
||||
// Stack writes:
|
||||
//
|
||||
// The compiler omits write barriers for writes to the current frame,
|
||||
// but if a stack pointer has been passed down the call stack, the
|
||||
// compiler will generate a write barrier for writes through that
|
||||
// pointer (because it doesn't know it's not a heap pointer).
|
||||
//
|
||||
//
|
||||
// Global writes:
|
||||
//
|
||||
// The Go garbage collector requires write barriers when heap pointers
|
||||
// are stored in globals. Many garbage collectors ignore writes to
|
||||
// globals and instead pick up global -> heap pointers during
|
||||
// termination. This increases pause time, so we instead rely on write
|
||||
// barriers for writes to globals so that we don't have to rescan
|
||||
// global during mark termination.
|
||||
//
|
||||
//
|
||||
// Publication ordering:
|
||||
//
|
||||
// The write barrier is *pre-publication*, meaning that the write
|
||||
// barrier happens prior to the *slot = ptr write that may make ptr
|
||||
// reachable by some goroutine that currently cannot reach it.
|
||||
//
|
||||
//
|
||||
// Signal handler pointer writes:
|
||||
//
|
||||
// In general, the signal handler cannot safely invoke the write
|
||||
// barrier because it may run without a P or even during the write
|
||||
// barrier.
|
||||
//
|
||||
// There is exactly one exception: profbuf.go omits a barrier during
|
||||
// signal handler profile logging. That's safe only because of the
|
||||
// deletion barrier. See profbuf.go for a detailed argument. If we
|
||||
// remove the deletion barrier, we'll have to work out a new way to
|
||||
// handle the profile logging.
|
||||
|
||||
// typedmemmove copies a value of type typ to dst from src.
|
||||
// Must be nosplit, see #16026.
|
||||
//
|
||||
// TODO: Perfect for go:nosplitrec since we can't have a safe point
|
||||
// anywhere in the bulk barrier or memmove.
|
||||
func Typedmemmove(typ *Type, dst, src unsafe.Pointer) {
|
||||
if dst == src {
|
||||
return
|
||||
}
|
||||
// There's a race here: if some other goroutine can write to
|
||||
// src, it may change some pointer in src after we've
|
||||
// performed the write barrier but before we perform the
|
||||
// memory copy. This safe because the write performed by that
|
||||
// other goroutine must also be accompanied by a write
|
||||
// barrier, so at worst we've unnecessarily greyed the old
|
||||
// pointer that was in src.
|
||||
c.Memmove(dst, src, typ.Size_)
|
||||
}
|
||||
|
||||
/*
|
||||
// wbZero performs the write barrier operations necessary before
|
||||
// zeroing a region of memory at address dst of type typ.
|
||||
// Does not actually do the zeroing.
|
||||
//
|
||||
//go:nowritebarrierrec
|
||||
//go:nosplit
|
||||
func wbZero(typ *_type, dst unsafe.Pointer) {
|
||||
bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes)
|
||||
}
|
||||
|
||||
// wbMove performs the write barrier operations necessary before
|
||||
// copying a region of memory from src to dst of type typ.
|
||||
// Does not actually do the copying.
|
||||
//
|
||||
//go:nowritebarrierrec
|
||||
//go:nosplit
|
||||
func wbMove(typ *_type, dst, src unsafe.Pointer) {
|
||||
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
|
||||
}
|
||||
|
||||
//go:linkname reflect_typedmemmove reflect.typedmemmove
|
||||
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
|
||||
if raceenabled {
|
||||
raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
|
||||
raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
|
||||
}
|
||||
if msanenabled {
|
||||
msanwrite(dst, typ.Size_)
|
||||
msanread(src, typ.Size_)
|
||||
}
|
||||
if asanenabled {
|
||||
asanwrite(dst, typ.Size_)
|
||||
asanread(src, typ.Size_)
|
||||
}
|
||||
typedmemmove(typ, dst, src)
|
||||
}
|
||||
|
||||
//go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
|
||||
func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
|
||||
reflect_typedmemmove(typ, dst, src)
|
||||
}
|
||||
|
||||
// reflectcallmove is invoked by reflectcall to copy the return values
|
||||
// out of the stack and into the heap, invoking the necessary write
|
||||
// barriers. dst, src, and size describe the return value area to
|
||||
// copy. typ describes the entire frame (not just the return values).
|
||||
// typ may be nil, which indicates write barriers are not needed.
|
||||
//
|
||||
// It must be nosplit and must only call nosplit functions because the
|
||||
// stack map of reflectcall is wrong.
|
||||
//
|
||||
//go:nosplit
|
||||
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
|
||||
if writeBarrier.needed && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
|
||||
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
|
||||
}
|
||||
memmove(dst, src, size)
|
||||
|
||||
// Move pointers returned in registers to a place where the GC can see them.
|
||||
for i := range regs.Ints {
|
||||
if regs.ReturnIsPtr.Get(i) {
|
||||
regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
|
||||
n := dstLen
|
||||
if n > srcLen {
|
||||
n = srcLen
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// The compiler emits calls to typedslicecopy before
|
||||
// instrumentation runs, so unlike the other copying and
|
||||
// assignment operations, it's not instrumented in the calling
|
||||
// code and needs its own instrumentation.
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
pc := abi.FuncPCABIInternal(slicecopy)
|
||||
racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
|
||||
racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
|
||||
}
|
||||
if msanenabled {
|
||||
msanwrite(dstPtr, uintptr(n)*typ.Size_)
|
||||
msanread(srcPtr, uintptr(n)*typ.Size_)
|
||||
}
|
||||
if asanenabled {
|
||||
asanwrite(dstPtr, uintptr(n)*typ.Size_)
|
||||
asanread(srcPtr, uintptr(n)*typ.Size_)
|
||||
}
|
||||
|
||||
if goexperiment.CgoCheck2 {
|
||||
cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
|
||||
}
|
||||
|
||||
if dstPtr == srcPtr {
|
||||
return n
|
||||
}
|
||||
|
||||
// Note: No point in checking typ.PtrBytes here:
|
||||
// compiler only emits calls to typedslicecopy for types with pointers,
|
||||
// and growslice and reflect_typedslicecopy check for pointers
|
||||
// before calling typedslicecopy.
|
||||
size := uintptr(n) * typ.Size_
|
||||
if writeBarrier.needed {
|
||||
pwsize := size - typ.Size_ + typ.PtrBytes
|
||||
bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
|
||||
}
|
||||
// See typedmemmove for a discussion of the race between the
|
||||
// barrier and memmove.
|
||||
memmove(dstPtr, srcPtr, size)
|
||||
return n
|
||||
}
|
||||
|
||||
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
|
||||
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
|
||||
if elemType.PtrBytes == 0 {
|
||||
return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
|
||||
}
|
||||
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
|
||||
}
|
||||
*/
|
||||
|
||||
// typedmemclr clears the typed memory at ptr with type typ. The
|
||||
// memory at ptr must already be initialized (and hence in type-safe
|
||||
// state). If the memory is being initialized for the first time, see
|
||||
// memclrNoHeapPointers.
|
||||
//
|
||||
// If the caller knows that typ has pointers, it can alternatively
|
||||
// call memclrHasPointers.
|
||||
//
|
||||
// TODO: A "go:nosplitrec" annotation would be perfect for this.
|
||||
func Typedmemclr(typ *Type, ptr unsafe.Pointer) {
|
||||
c.Memset(ptr, 0, typ.Size_)
|
||||
}
|
||||
|
||||
/*
|
||||
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
|
||||
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
|
||||
if writeBarrier.needed && typ.PtrBytes != 0 {
|
||||
bulkBarrierPreWrite(uintptr(ptr), 0, size)
|
||||
}
|
||||
memclrNoHeapPointers(ptr, size)
|
||||
}
|
||||
|
||||
//go:linkname reflect_typedarrayclear reflect.typedarrayclear
|
||||
func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
|
||||
size := typ.Size_ * uintptr(len)
|
||||
if writeBarrier.needed && typ.PtrBytes != 0 {
|
||||
bulkBarrierPreWrite(uintptr(ptr), 0, size)
|
||||
}
|
||||
memclrNoHeapPointers(ptr, size)
|
||||
}
|
||||
|
||||
// memclrHasPointers clears n bytes of typed memory starting at ptr.
|
||||
// The caller must ensure that the type of the object at ptr has
|
||||
// pointers, usually by checking typ.PtrBytes. However, ptr
|
||||
// does not have to point to the start of the allocation.
|
||||
//
|
||||
//go:nosplit
|
||||
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
|
||||
bulkBarrierPreWrite(uintptr(ptr), 0, n)
|
||||
memclrNoHeapPointers(ptr, n)
|
||||
}
|
||||
*/
|
||||
1412
compiler/internal/runtime/panic.go
Normal file
1412
compiler/internal/runtime/panic.go
Normal file
File diff suppressed because it is too large
Load Diff
134
compiler/internal/runtime/stubs.go
Normal file
134
compiler/internal/runtime/stubs.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c/sync/atomic"
|
||||
"github.com/goplus/llgo/c/time"
|
||||
"github.com/goplus/llgo/compiler/internal/runtime/math"
|
||||
)
|
||||
|
||||
//go:linkname fastrand C.rand
|
||||
func fastrand() uint32
|
||||
|
||||
//go:linkname srand C.srand
|
||||
func srand(uint32)
|
||||
|
||||
func fastrand64() uint64 {
|
||||
n := uint64(fastrand())
|
||||
n += 0xa0761d6478bd642f
|
||||
hi, lo := math.Mul64(n, n^0xe7037ed1a0b428db)
|
||||
return hi ^ lo
|
||||
}
|
||||
|
||||
func init() {
|
||||
srand(uint32(time.Time(nil)))
|
||||
hashkey[0] = uintptr(fastrand()) | 1
|
||||
hashkey[1] = uintptr(fastrand()) | 1
|
||||
hashkey[2] = uintptr(fastrand()) | 1
|
||||
hashkey[3] = uintptr(fastrand()) | 1
|
||||
}
|
||||
|
||||
/* TODO(xsw):
|
||||
func fastrand() uint32 {
|
||||
mp := getg().m
|
||||
// Implement wyrand: https://github.com/wangyi-fudan/wyhash
|
||||
// Only the platform that math.Mul64 can be lowered
|
||||
// by the compiler should be in this list.
|
||||
if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
|
||||
goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
|
||||
goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 {
|
||||
mp.fastrand += 0xa0761d6478bd642f
|
||||
hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db)
|
||||
return uint32(hi ^ lo)
|
||||
}
|
||||
|
||||
// Implement xorshift64+: 2 32-bit xorshift sequences added together.
|
||||
// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
|
||||
// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
|
||||
// This generator passes the SmallCrush suite, part of TestU01 framework:
|
||||
// http://simul.iro.umontreal.ca/testu01/tu01.html
|
||||
t := (*[2]uint32)(unsafe.Pointer(&mp.fastrand))
|
||||
s1, s0 := t[0], t[1]
|
||||
s1 ^= s1 << 17
|
||||
s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
|
||||
t[0], t[1] = s0, s1
|
||||
return s0 + s1
|
||||
}
|
||||
*/
|
||||
|
||||
//go:nosplit
|
||||
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(p) + x)
|
||||
}
|
||||
|
||||
// implementation of new builtin
|
||||
// compiler (both frontend and SSA backend) knows the signature
|
||||
// of this function.
|
||||
func newobject(typ *_type) unsafe.Pointer {
|
||||
return AllocZ(typ.Size_)
|
||||
}
|
||||
|
||||
// TODO
|
||||
func roundupsize(size uintptr) uintptr {
|
||||
// if size < _MaxSmallSize {
|
||||
// if size <= smallSizeMax-8 {
|
||||
// return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
|
||||
// } else {
|
||||
// return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
|
||||
// }
|
||||
// }
|
||||
// if size+_PageSize < size {
|
||||
// return size
|
||||
// }
|
||||
// return alignUp(size, _PageSize)
|
||||
return size
|
||||
}
|
||||
|
||||
// newarray allocates an array of n elements of type typ.
|
||||
func newarray(typ *_type, n int) unsafe.Pointer {
|
||||
if n == 1 {
|
||||
return AllocZ(typ.Size_)
|
||||
}
|
||||
mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
|
||||
if overflow || mem > maxAlloc || n < 0 {
|
||||
panic(plainError("runtime: allocation size out of range"))
|
||||
}
|
||||
return AllocZ(mem)
|
||||
}
|
||||
|
||||
const (
|
||||
// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
|
||||
_64bit = 1 << (^uintptr(0) >> 63) / 2
|
||||
heapAddrBits = (_64bit)*48 + (1-_64bit)*(32)
|
||||
maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
|
||||
)
|
||||
|
||||
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
|
||||
// bulkBarrierPreWrite(uintptr(ptr), 0, n)
|
||||
// memclrNoHeapPointers(ptr, n)
|
||||
}
|
||||
|
||||
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
|
||||
}
|
||||
|
||||
func fatal(s string) {
|
||||
print("fatal error: ", s, "\n")
|
||||
}
|
||||
|
||||
func throw(s string) {
|
||||
print("fatal error: ", s, "\n")
|
||||
}
|
||||
|
||||
func atomicOr8(ptr *uint8, v uint8) uint8 {
|
||||
return (uint8)(atomic.Or((*uint)(unsafe.Pointer(ptr)), uint(v)))
|
||||
}
|
||||
|
||||
func noescape(p unsafe.Pointer) unsafe.Pointer {
|
||||
x := uintptr(p)
|
||||
return unsafe.Pointer(x ^ 0)
|
||||
}
|
||||
31
compiler/internal/runtime/type.go
Normal file
31
compiler/internal/runtime/type.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Runtime type representation.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"github.com/goplus/llgo/compiler/internal/abi"
|
||||
)
|
||||
|
||||
type _type = abi.Type
|
||||
|
||||
/*
|
||||
type maptype = abi.MapType
|
||||
|
||||
type arraytype = abi.ArrayType
|
||||
|
||||
type chantype = abi.ChanType
|
||||
|
||||
type slicetype = abi.SliceType
|
||||
|
||||
type functype = abi.FuncType
|
||||
|
||||
type ptrtype = abi.PtrType
|
||||
|
||||
type name = abi.Name
|
||||
|
||||
type structtype = abi.StructType
|
||||
*/
|
||||
132
compiler/internal/runtime/utf8.go
Normal file
132
compiler/internal/runtime/utf8.go
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
// Numbers fundamental to the encoding.
|
||||
const (
|
||||
runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
|
||||
runeSelf = 0x80 // characters below runeSelf are represented as themselves in a single byte.
|
||||
maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
|
||||
)
|
||||
|
||||
// Code points in the surrogate range are not valid for UTF-8.
|
||||
const (
|
||||
surrogateMin = 0xD800
|
||||
surrogateMax = 0xDFFF
|
||||
)
|
||||
|
||||
const (
|
||||
t1 = 0x00 // 0000 0000
|
||||
tx = 0x80 // 1000 0000
|
||||
t2 = 0xC0 // 1100 0000
|
||||
t3 = 0xE0 // 1110 0000
|
||||
t4 = 0xF0 // 1111 0000
|
||||
t5 = 0xF8 // 1111 1000
|
||||
|
||||
maskx = 0x3F // 0011 1111
|
||||
mask2 = 0x1F // 0001 1111
|
||||
mask3 = 0x0F // 0000 1111
|
||||
mask4 = 0x07 // 0000 0111
|
||||
|
||||
rune1Max = 1<<7 - 1
|
||||
rune2Max = 1<<11 - 1
|
||||
rune3Max = 1<<16 - 1
|
||||
|
||||
// The default lowest and highest continuation byte.
|
||||
locb = 0x80 // 1000 0000
|
||||
hicb = 0xBF // 1011 1111
|
||||
)
|
||||
|
||||
// countrunes returns the number of runes in s.
|
||||
// func countrunes(s string) int {
|
||||
// n := 0
|
||||
// for range s {
|
||||
// n++
|
||||
// }
|
||||
// return n
|
||||
// }
|
||||
|
||||
// decoderune returns the non-ASCII rune at the start of
|
||||
// s[k:] and the index after the rune in s.
|
||||
//
|
||||
// decoderune assumes that caller has checked that
|
||||
// the to be decoded rune is a non-ASCII rune.
|
||||
//
|
||||
// If the string appears to be incomplete or decoding problems
|
||||
// are encountered (runeerror, k + 1) is returned to ensure
|
||||
// progress when decoderune is used to iterate over a string.
|
||||
func decoderune(s string, k int) (r rune, pos int) {
|
||||
pos = k
|
||||
|
||||
if k >= len(s) {
|
||||
return runeError, k + 1
|
||||
}
|
||||
|
||||
s = s[k:]
|
||||
|
||||
switch {
|
||||
case t2 <= s[0] && s[0] < t3:
|
||||
// 0080-07FF two byte sequence
|
||||
if len(s) > 1 && (locb <= s[1] && s[1] <= hicb) {
|
||||
r = rune(s[0]&mask2)<<6 | rune(s[1]&maskx)
|
||||
pos += 2
|
||||
if rune1Max < r {
|
||||
return
|
||||
}
|
||||
}
|
||||
case t3 <= s[0] && s[0] < t4:
|
||||
// 0800-FFFF three byte sequence
|
||||
if len(s) > 2 && (locb <= s[1] && s[1] <= hicb) && (locb <= s[2] && s[2] <= hicb) {
|
||||
r = rune(s[0]&mask3)<<12 | rune(s[1]&maskx)<<6 | rune(s[2]&maskx)
|
||||
pos += 3
|
||||
if rune2Max < r && !(surrogateMin <= r && r <= surrogateMax) {
|
||||
return
|
||||
}
|
||||
}
|
||||
case t4 <= s[0] && s[0] < t5:
|
||||
// 10000-1FFFFF four byte sequence
|
||||
if len(s) > 3 && (locb <= s[1] && s[1] <= hicb) && (locb <= s[2] && s[2] <= hicb) && (locb <= s[3] && s[3] <= hicb) {
|
||||
r = rune(s[0]&mask4)<<18 | rune(s[1]&maskx)<<12 | rune(s[2]&maskx)<<6 | rune(s[3]&maskx)
|
||||
pos += 4
|
||||
if rune3Max < r && r <= maxRune {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return runeError, k + 1
|
||||
}
|
||||
|
||||
// encoderune writes into p (which must be large enough) the UTF-8 encoding of the rune.
|
||||
// It returns the number of bytes written.
|
||||
func encoderune(p []byte, r rune) int {
|
||||
// Negative values are erroneous. Making it unsigned addresses the problem.
|
||||
switch i := uint32(r); {
|
||||
case i <= rune1Max:
|
||||
p[0] = byte(r)
|
||||
return 1
|
||||
case i <= rune2Max:
|
||||
_ = p[1] // eliminate bounds checks
|
||||
p[0] = t2 | byte(r>>6)
|
||||
p[1] = tx | byte(r)&maskx
|
||||
return 2
|
||||
case i > maxRune, surrogateMin <= i && i <= surrogateMax:
|
||||
r = runeError
|
||||
fallthrough
|
||||
case i <= rune3Max:
|
||||
_ = p[2] // eliminate bounds checks
|
||||
p[0] = t3 | byte(r>>12)
|
||||
p[1] = tx | byte(r>>6)&maskx
|
||||
p[2] = tx | byte(r)&maskx
|
||||
return 3
|
||||
default:
|
||||
_ = p[3] // eliminate bounds checks
|
||||
p[0] = t4 | byte(r>>18)
|
||||
p[1] = tx | byte(r>>12)&maskx
|
||||
p[2] = tx | byte(r>>6)&maskx
|
||||
p[3] = tx | byte(r)&maskx
|
||||
return 4
|
||||
}
|
||||
}
|
||||
46
compiler/internal/runtime/z_cgo.go
Normal file
46
compiler/internal/runtime/z_cgo.go
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
)
|
||||
|
||||
func CString(s string) *int8 {
|
||||
p := c.Malloc(uintptr(len(s)) + 1)
|
||||
return CStrCopy(p, *(*String)(unsafe.Pointer(&s)))
|
||||
}
|
||||
|
||||
func CBytes(b []byte) *int8 {
|
||||
p := c.Malloc(uintptr(len(b)))
|
||||
c.Memcpy(p, unsafe.Pointer(&b[0]), uintptr(len(b)))
|
||||
return (*int8)(p)
|
||||
}
|
||||
|
||||
func GoString(p *int8) string {
|
||||
return GoStringN(p, int(c.Strlen(p)))
|
||||
}
|
||||
|
||||
func GoStringN(p *int8, n int) string {
|
||||
return string((*[1 << 30]byte)(unsafe.Pointer(p))[:n:n])
|
||||
}
|
||||
|
||||
func GoBytes(p *int8, n int) []byte {
|
||||
return (*[1 << 30]byte)(unsafe.Pointer(p))[:n:n]
|
||||
}
|
||||
331
compiler/internal/runtime/z_chan.go
Normal file
331
compiler/internal/runtime/z_chan.go
Normal file
@@ -0,0 +1,331 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
"github.com/goplus/llgo/c/pthread/sync"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const (
|
||||
chanNoSendRecv = 0
|
||||
chanHasRecv = 1
|
||||
)
|
||||
|
||||
type Chan struct {
|
||||
mutex sync.Mutex
|
||||
cond sync.Cond
|
||||
data unsafe.Pointer
|
||||
getp int
|
||||
len int
|
||||
cap int
|
||||
sops []*selectOp
|
||||
sends uint16
|
||||
close bool
|
||||
}
|
||||
|
||||
func NewChan(eltSize, cap int) *Chan {
|
||||
ret := new(Chan)
|
||||
if cap > 0 {
|
||||
ret.data = AllocU(uintptr(cap * eltSize))
|
||||
ret.cap = cap
|
||||
}
|
||||
ret.cond.Init(nil)
|
||||
return ret
|
||||
}
|
||||
|
||||
func ChanLen(p *Chan) (n int) {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
p.mutex.Lock()
|
||||
n = p.len
|
||||
p.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func ChanCap(p *Chan) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
return p.cap
|
||||
}
|
||||
|
||||
func notifyOps(p *Chan) {
|
||||
for _, sop := range p.sops {
|
||||
sop.notify()
|
||||
}
|
||||
}
|
||||
|
||||
func ChanClose(p *Chan) {
|
||||
p.mutex.Lock()
|
||||
p.close = true
|
||||
notifyOps(p)
|
||||
p.mutex.Unlock()
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
func ChanTrySend(p *Chan, v unsafe.Pointer, eltSize int) bool {
|
||||
n := p.cap
|
||||
p.mutex.Lock()
|
||||
if n == 0 {
|
||||
if p.getp != chanHasRecv || p.close {
|
||||
p.mutex.Unlock()
|
||||
return false
|
||||
}
|
||||
if p.data != nil {
|
||||
c.Memcpy(p.data, v, uintptr(eltSize))
|
||||
}
|
||||
p.getp = chanNoSendRecv
|
||||
} else {
|
||||
if p.len == n || p.close {
|
||||
p.mutex.Unlock()
|
||||
return false
|
||||
}
|
||||
off := (p.getp + p.len) % n
|
||||
c.Memcpy(c.Advance(p.data, off*eltSize), v, uintptr(eltSize))
|
||||
p.len++
|
||||
}
|
||||
notifyOps(p)
|
||||
p.mutex.Unlock()
|
||||
p.cond.Broadcast()
|
||||
return true
|
||||
}
|
||||
|
||||
func ChanSend(p *Chan, v unsafe.Pointer, eltSize int) bool {
|
||||
n := p.cap
|
||||
p.mutex.Lock()
|
||||
if n == 0 {
|
||||
for p.getp != chanHasRecv && !p.close {
|
||||
p.sends++
|
||||
p.cond.Wait(&p.mutex)
|
||||
p.sends--
|
||||
}
|
||||
if p.close {
|
||||
p.mutex.Unlock()
|
||||
return false
|
||||
}
|
||||
if p.data != nil {
|
||||
c.Memcpy(p.data, v, uintptr(eltSize))
|
||||
}
|
||||
p.getp = chanNoSendRecv
|
||||
} else {
|
||||
for p.len == n {
|
||||
p.cond.Wait(&p.mutex)
|
||||
}
|
||||
if p.close {
|
||||
p.mutex.Unlock()
|
||||
return false
|
||||
}
|
||||
off := (p.getp + p.len) % n
|
||||
c.Memcpy(c.Advance(p.data, off*eltSize), v, uintptr(eltSize))
|
||||
p.len++
|
||||
}
|
||||
notifyOps(p)
|
||||
p.mutex.Unlock()
|
||||
p.cond.Broadcast()
|
||||
return true
|
||||
}
|
||||
|
||||
func ChanTryRecv(p *Chan, v unsafe.Pointer, eltSize int) (recvOK bool, tryOK bool) {
|
||||
n := p.cap
|
||||
p.mutex.Lock()
|
||||
if n == 0 {
|
||||
if p.sends == 0 || p.getp == chanHasRecv || p.close {
|
||||
tryOK = p.close
|
||||
p.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
p.getp = chanHasRecv
|
||||
p.data = v
|
||||
} else {
|
||||
if p.len == 0 {
|
||||
tryOK = p.close
|
||||
p.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
if v != nil {
|
||||
c.Memcpy(v, c.Advance(p.data, p.getp*eltSize), uintptr(eltSize))
|
||||
}
|
||||
p.getp = (p.getp + 1) % n
|
||||
p.len--
|
||||
}
|
||||
notifyOps(p)
|
||||
p.mutex.Unlock()
|
||||
p.cond.Broadcast()
|
||||
if n == 0 {
|
||||
p.mutex.Lock()
|
||||
for p.getp == chanHasRecv && !p.close {
|
||||
p.cond.Wait(&p.mutex)
|
||||
}
|
||||
recvOK = !p.close
|
||||
tryOK = recvOK
|
||||
p.mutex.Unlock()
|
||||
} else {
|
||||
recvOK, tryOK = true, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ChanRecv(p *Chan, v unsafe.Pointer, eltSize int) (recvOK bool) {
|
||||
n := p.cap
|
||||
p.mutex.Lock()
|
||||
if n == 0 {
|
||||
for p.getp == chanHasRecv && !p.close {
|
||||
p.cond.Wait(&p.mutex)
|
||||
}
|
||||
if p.close {
|
||||
p.mutex.Unlock()
|
||||
return false
|
||||
}
|
||||
p.getp = chanHasRecv
|
||||
p.data = v
|
||||
} else {
|
||||
for p.len == 0 {
|
||||
if p.close {
|
||||
p.mutex.Unlock()
|
||||
return false
|
||||
}
|
||||
p.cond.Wait(&p.mutex)
|
||||
}
|
||||
if v != nil {
|
||||
c.Memcpy(v, c.Advance(p.data, p.getp*eltSize), uintptr(eltSize))
|
||||
}
|
||||
p.getp = (p.getp + 1) % n
|
||||
p.len--
|
||||
}
|
||||
notifyOps(p)
|
||||
p.mutex.Unlock()
|
||||
p.cond.Broadcast()
|
||||
if n == 0 {
|
||||
p.mutex.Lock()
|
||||
for p.getp == chanHasRecv && !p.close {
|
||||
p.cond.Wait(&p.mutex)
|
||||
}
|
||||
recvOK = !p.close
|
||||
p.mutex.Unlock()
|
||||
} else {
|
||||
recvOK = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
type selectOp struct {
|
||||
mutex sync.Mutex
|
||||
cond sync.Cond
|
||||
sem bool
|
||||
}
|
||||
|
||||
func (p *selectOp) init() {
|
||||
p.mutex.Init(nil)
|
||||
p.cond.Init(nil)
|
||||
p.sem = false
|
||||
}
|
||||
|
||||
func (p *selectOp) end() {
|
||||
p.mutex.Destroy()
|
||||
p.cond.Destroy()
|
||||
}
|
||||
|
||||
func (p *selectOp) notify() {
|
||||
p.mutex.Lock()
|
||||
p.sem = true
|
||||
p.mutex.Unlock()
|
||||
p.cond.Signal()
|
||||
}
|
||||
|
||||
func (p *selectOp) wait() {
|
||||
p.mutex.Lock()
|
||||
if !p.sem {
|
||||
p.cond.Wait(&p.mutex)
|
||||
}
|
||||
p.sem = false
|
||||
p.mutex.Unlock()
|
||||
}
|
||||
|
||||
// ChanOp represents a channel operation.
|
||||
type ChanOp struct {
|
||||
C *Chan
|
||||
|
||||
Val unsafe.Pointer
|
||||
Size int32
|
||||
|
||||
Send bool
|
||||
}
|
||||
|
||||
// TrySelect executes a non-blocking select operation.
|
||||
func TrySelect(ops ...ChanOp) (isel int, recvOK, tryOK bool) {
|
||||
for isel = range ops {
|
||||
op := ops[isel]
|
||||
if op.Send {
|
||||
if tryOK = ChanTrySend(op.C, op.Val, int(op.Size)); tryOK {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if recvOK, tryOK = ChanTryRecv(op.C, op.Val, int(op.Size)); tryOK {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Select executes a blocking select operation.
|
||||
func Select(ops ...ChanOp) (isel int, recvOK bool) {
|
||||
selOp := new(selectOp) // TODO(xsw): use c.AllocaNew[selectOp]()
|
||||
selOp.init()
|
||||
for _, op := range ops {
|
||||
prepareSelect(op.C, selOp)
|
||||
}
|
||||
var tryOK bool
|
||||
for {
|
||||
if isel, recvOK, tryOK = TrySelect(ops...); tryOK {
|
||||
break
|
||||
}
|
||||
selOp.wait()
|
||||
}
|
||||
for _, op := range ops {
|
||||
endSelect(op.C, selOp)
|
||||
}
|
||||
selOp.end()
|
||||
return
|
||||
}
|
||||
|
||||
func prepareSelect(c *Chan, selOp *selectOp) {
|
||||
c.mutex.Lock()
|
||||
c.sops = append(c.sops, selOp)
|
||||
c.mutex.Unlock()
|
||||
}
|
||||
|
||||
func endSelect(c *Chan, selOp *selectOp) {
|
||||
c.mutex.Lock()
|
||||
for i, op := range c.sops {
|
||||
if op == selOp {
|
||||
c.sops = append(c.sops[:i], c.sops[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mutex.Unlock()
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
156
compiler/internal/runtime/z_error.go
Normal file
156
compiler/internal/runtime/z_error.go
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c/bitcast"
|
||||
"github.com/goplus/llgo/compiler/internal/abi"
|
||||
)
|
||||
|
||||
type errorString string
|
||||
|
||||
func (e errorString) RuntimeError() {}
|
||||
|
||||
func (e errorString) Error() string {
|
||||
return "runtime error: " + string(e)
|
||||
}
|
||||
|
||||
type plainError string
|
||||
|
||||
func (e plainError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func AssertRuntimeError(b bool, msg string) {
|
||||
if b {
|
||||
panic(errorString(msg).Error())
|
||||
}
|
||||
}
|
||||
|
||||
func AssertNegativeShift(b bool) {
|
||||
if b {
|
||||
panic(errorString("negative shift amount").Error())
|
||||
}
|
||||
}
|
||||
|
||||
func AssertIndexRange(b bool) {
|
||||
if b {
|
||||
panic(errorString("index out of range").Error())
|
||||
}
|
||||
}
|
||||
|
||||
// printany prints an argument passed to panic.
|
||||
// If panic is called with a value that has a String or Error method,
|
||||
// it has already been converted into a string by preprintpanics.
|
||||
func printany(i any) {
|
||||
switch v := i.(type) {
|
||||
case nil:
|
||||
print("nil")
|
||||
case bool:
|
||||
print(v)
|
||||
case int:
|
||||
print(v)
|
||||
case int8:
|
||||
print(v)
|
||||
case int16:
|
||||
print(v)
|
||||
case int32:
|
||||
print(v)
|
||||
case int64:
|
||||
print(v)
|
||||
case uint:
|
||||
print(v)
|
||||
case uint8:
|
||||
print(v)
|
||||
case uint16:
|
||||
print(v)
|
||||
case uint32:
|
||||
print(v)
|
||||
case uint64:
|
||||
print(v)
|
||||
case uintptr:
|
||||
print(v)
|
||||
case float32:
|
||||
print(v)
|
||||
case float64:
|
||||
print(v)
|
||||
case complex64:
|
||||
print(v)
|
||||
case complex128:
|
||||
print(v)
|
||||
case string:
|
||||
print(v)
|
||||
case error:
|
||||
print(v.Error())
|
||||
case interface{ String() string }:
|
||||
print(v.String())
|
||||
default:
|
||||
printanycustomtype(i)
|
||||
}
|
||||
}
|
||||
|
||||
func efaceOf(ep *any) *eface {
|
||||
return (*eface)(unsafe.Pointer(ep))
|
||||
}
|
||||
|
||||
func printanycustomtype(i any) {
|
||||
e := efaceOf(&i)
|
||||
typestring := e._type.String()
|
||||
|
||||
switch e._type.Kind() {
|
||||
case abi.String:
|
||||
print(typestring, `("`, *(*string)(e.data), `")`)
|
||||
case abi.Bool:
|
||||
if isDirectIface(e._type) {
|
||||
print(typestring, "(", uintptr(e.data) != 0, ")")
|
||||
} else {
|
||||
print(typestring, "(", *(*bool)(e.data), ")")
|
||||
}
|
||||
case abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Int64:
|
||||
if isDirectIface(e._type) {
|
||||
print(typestring, "(", int64(uintptr(e.data)), ")")
|
||||
} else {
|
||||
print(typestring, "(", *(*int64)(e.data), ")")
|
||||
}
|
||||
case abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uint64, abi.Uintptr:
|
||||
if isDirectIface(e._type) {
|
||||
print(typestring, "(", uint64(uintptr(e.data)), ")")
|
||||
} else {
|
||||
print(typestring, "(", *(*uint64)(e.data), ")")
|
||||
}
|
||||
case abi.Float32:
|
||||
if isDirectIface(e._type) {
|
||||
print(typestring, "(", bitcast.ToFloat32((uintptr(e.data))), ")")
|
||||
} else {
|
||||
print(typestring, "(", *(*float32)(e.data), ")")
|
||||
}
|
||||
case abi.Float64:
|
||||
if isDirectIface(e._type) {
|
||||
print(typestring, "(", bitcast.ToFloat64(uintptr(e.data)), ")")
|
||||
} else {
|
||||
print(typestring, "(", *(*float64)(e.data), ")")
|
||||
}
|
||||
case abi.Complex64:
|
||||
println(typestring, *(*complex64)(e.data))
|
||||
case abi.Complex128:
|
||||
println(typestring, *(*complex128)(e.data))
|
||||
default:
|
||||
print("(", typestring, ") ", e.data)
|
||||
}
|
||||
}
|
||||
635
compiler/internal/runtime/z_face.go
Normal file
635
compiler/internal/runtime/z_face.go
Normal file
@@ -0,0 +1,635 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
"github.com/goplus/llgo/compiler/internal/abi"
|
||||
)
|
||||
|
||||
type eface struct {
|
||||
_type *_type
|
||||
data unsafe.Pointer
|
||||
}
|
||||
|
||||
type iface struct {
|
||||
tab *itab
|
||||
data unsafe.Pointer
|
||||
}
|
||||
|
||||
type interfacetype = abi.InterfaceType
|
||||
|
||||
// layout of Itab known to compilers
|
||||
// allocated in non-garbage-collected memory
|
||||
// Needs to be in sync with
|
||||
// ../cmd/compile/internal/reflectdata/reflect.go:/^func.WriteTabs.
|
||||
type itab struct {
|
||||
inter *interfacetype
|
||||
_type *_type
|
||||
hash uint32 // copy of _type.hash. Used for type switches.
|
||||
_ [4]byte
|
||||
fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
type (
|
||||
Eface = eface
|
||||
Iface = iface
|
||||
Itab = itab
|
||||
)
|
||||
|
||||
type Imethod = abi.Imethod
|
||||
type Method = abi.Method
|
||||
type FuncType = abi.FuncType
|
||||
type InterfaceType = abi.InterfaceType
|
||||
|
||||
// ToEface converts an iface to an eface.
|
||||
func ToEface(i Iface) Eface {
|
||||
return Eface{i.tab._type, i.data}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const (
|
||||
typeHdrSize = unsafe.Sizeof(abi.Type{})
|
||||
arrayTypeHdrSize = unsafe.Sizeof(abi.ArrayType{})
|
||||
chanTypeHdrSize = unsafe.Sizeof(abi.ChanType{})
|
||||
funcTypeHdrSize = unsafe.Sizeof(abi.FuncType{})
|
||||
interfaceTypeHdrSize = unsafe.Sizeof(abi.InterfaceType{})
|
||||
mapTypeHdrSize = unsafe.Sizeof(abi.MapType{})
|
||||
ptrTypeHdrSize = unsafe.Sizeof(abi.PtrType{})
|
||||
sliceTypeHdrSize = unsafe.Sizeof(abi.SliceType{})
|
||||
structTypeHdrSize = unsafe.Sizeof(abi.StructType{})
|
||||
uncommonTypeHdrSize = unsafe.Sizeof(abi.UncommonType{})
|
||||
methodSize = unsafe.Sizeof(abi.Method{})
|
||||
pointerSize = unsafe.Sizeof(uintptr(0))
|
||||
itabHdrSize = unsafe.Sizeof(itab{}) - pointerSize
|
||||
)
|
||||
|
||||
func hdrSizeOf(kind abi.Kind) uintptr {
|
||||
switch kind {
|
||||
case abi.Array:
|
||||
return arrayTypeHdrSize
|
||||
case abi.Chan:
|
||||
return chanTypeHdrSize
|
||||
case abi.Func:
|
||||
return funcTypeHdrSize
|
||||
case abi.Interface:
|
||||
return interfaceTypeHdrSize
|
||||
case abi.Map:
|
||||
return mapTypeHdrSize
|
||||
case abi.Pointer:
|
||||
return ptrTypeHdrSize
|
||||
case abi.Slice:
|
||||
return sliceTypeHdrSize
|
||||
case abi.Struct:
|
||||
return structTypeHdrSize
|
||||
default:
|
||||
return typeHdrSize
|
||||
}
|
||||
}
|
||||
|
||||
// NewNamed returns an uninitialized named type.
|
||||
func NewNamed(pkgPath string, name string, kind abi.Kind, size uintptr, methods, ptrMethods int) *Type {
|
||||
if pkgPath != "" {
|
||||
name = pkgName(pkgPath) + "." + name
|
||||
}
|
||||
if t := rtypeList.findNamed(pkgPath, name); t != nil {
|
||||
return t
|
||||
}
|
||||
ret := allocUncommonType(kind, size, methods, abi.TFlagUninited|abi.TFlagNamed|abi.TFlagUncommon, pkgPath)
|
||||
ret.Str_ = name
|
||||
ret.Hash = 9157 + hashString(pkgPath) + hashString(name)
|
||||
if ptrMethods == 0 {
|
||||
ret.PtrToThis_ = newPointer(ret)
|
||||
} else {
|
||||
ret.PtrToThis_ = allocUncommonType(abi.Pointer, pointerSize, ptrMethods, abi.TFlagUncommon, pkgPath)
|
||||
setPointer((*abi.PtrType)(unsafe.Pointer(ret.PtrToThis_)), ret)
|
||||
}
|
||||
rtypeList.addType(ret)
|
||||
return ret
|
||||
}
|
||||
|
||||
func lastSlash(s string) int {
|
||||
i := len(s) - 1
|
||||
for i >= 0 && s[i] != '/' {
|
||||
i--
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func pkgName(path string) string {
|
||||
i := lastSlash(path)
|
||||
return path[i+1:]
|
||||
}
|
||||
|
||||
// InitNamed initializes an uninitialized named type.
|
||||
func InitNamed(ret *Type, underlying *Type, methods, ptrMethods []Method) {
|
||||
// skip initialized
|
||||
if ret.TFlag&abi.TFlagUninited == 0 {
|
||||
return
|
||||
}
|
||||
setUnderlying(ret, underlying)
|
||||
if len(methods) > 0 {
|
||||
setUncommon(ret, methods)
|
||||
}
|
||||
if len(ptrMethods) > 0 {
|
||||
setUncommon(ret.PtrToThis_, ptrMethods)
|
||||
}
|
||||
}
|
||||
|
||||
func allocUncommonType(kind abi.Kind, size uintptr, methods int, tflag abi.TFlag, pkgPath string) *Type {
|
||||
baseSize := hdrSizeOf(kind)
|
||||
allocSize := baseSize + uncommonTypeHdrSize + uintptr(methods)*methodSize
|
||||
ret := (*Type)(AllocU(allocSize))
|
||||
ret.Size_ = size
|
||||
ret.Kind_ = uint8(kind)
|
||||
ret.TFlag = tflag
|
||||
uncommon := (*abi.UncommonType)(c.Advance(unsafe.Pointer(ret), int(baseSize)))
|
||||
uncommon.PkgPath_ = pkgPath
|
||||
uncommon.Moff = uint32(uncommonTypeHdrSize)
|
||||
return ret
|
||||
}
|
||||
|
||||
func setUnderlying(ret *Type, underlying *Type) {
|
||||
str := ret.Str_
|
||||
ptr := ret.PtrToThis_
|
||||
|
||||
baseSize := hdrSizeOf(ret.Kind())
|
||||
c.Memcpy(unsafe.Pointer(ret), unsafe.Pointer(underlying), baseSize)
|
||||
|
||||
ret.Str_ = str
|
||||
ret.PtrToThis_ = ptr
|
||||
ret.TFlag = underlying.TFlag | abi.TFlagNamed | abi.TFlagUncommon
|
||||
}
|
||||
|
||||
func setUncommon(ret *Type, methods []Method) {
|
||||
ptr := unsafe.Pointer(ret)
|
||||
baseSize := hdrSizeOf(ret.Kind())
|
||||
|
||||
n := len(methods)
|
||||
xcount := uint16(0)
|
||||
for _, m := range methods {
|
||||
if !m.Exported() {
|
||||
break
|
||||
}
|
||||
xcount++
|
||||
}
|
||||
uncommon := (*abi.UncommonType)(c.Advance(ptr, int(baseSize)))
|
||||
uncommon.Mcount = uint16(n)
|
||||
uncommon.Xcount = xcount
|
||||
uncommon.Moff = uint32(uncommonTypeHdrSize)
|
||||
|
||||
extraOff := int(baseSize + uncommonTypeHdrSize)
|
||||
data := (*abi.Method)(c.Advance(ptr, extraOff))
|
||||
copy(unsafe.Slice(data, n), methods)
|
||||
}
|
||||
|
||||
// Func returns a function type.
|
||||
func Func(in, out []*Type, variadic bool) *FuncType {
|
||||
if t := rtypeList.findFunc(in, out, variadic); t != nil {
|
||||
return t
|
||||
}
|
||||
ret := &FuncType{
|
||||
Type: Type{
|
||||
Size_: 2 * unsafe.Sizeof(uintptr(0)),
|
||||
PtrBytes: 2 * pointerSize,
|
||||
Align_: uint8(pointerAlign),
|
||||
FieldAlign_: uint8(pointerAlign),
|
||||
Kind_: uint8(abi.Func),
|
||||
},
|
||||
In: in,
|
||||
Out: out,
|
||||
}
|
||||
var hash uint32 = 9091
|
||||
if variadic {
|
||||
hash *= 8863
|
||||
ret.TFlag |= abi.TFlagVariadic
|
||||
}
|
||||
hash += 3*hashTuple(in) + 5*hashTuple(out)
|
||||
ret.Hash = hash
|
||||
ret.Str_ = funcStr(ret)
|
||||
rtypeList.addType(&ret.Type)
|
||||
return ret
|
||||
}
|
||||
|
||||
// NewNamedInterface returns an interface type.
|
||||
// Don't call NewNamed for named interface type.
|
||||
func NewNamedInterface(pkgPath, name string) *InterfaceType {
|
||||
if pkgPath != "" {
|
||||
name = pkgName(pkgPath) + "." + name
|
||||
}
|
||||
if t := rtypeList.findNamed(pkgPath, name); t != nil {
|
||||
return t.InterfaceType()
|
||||
}
|
||||
ret := &struct {
|
||||
abi.InterfaceType
|
||||
u abi.UncommonType
|
||||
}{
|
||||
abi.InterfaceType{
|
||||
Type: Type{
|
||||
Size_: unsafe.Sizeof(eface{}),
|
||||
PtrBytes: 2 * pointerSize,
|
||||
Hash: 9157 + hashString(pkgPath) + hashString(name),
|
||||
Align_: uint8(pointerAlign),
|
||||
FieldAlign_: uint8(pointerAlign),
|
||||
Kind_: uint8(abi.Interface),
|
||||
Str_: name,
|
||||
TFlag: abi.TFlagNamed | abi.TFlagUncommon,
|
||||
},
|
||||
PkgPath_: pkgPath,
|
||||
},
|
||||
abi.UncommonType{
|
||||
PkgPath_: pkgPath,
|
||||
},
|
||||
}
|
||||
rtypeList.addType(&ret.Type)
|
||||
return &ret.InterfaceType
|
||||
}
|
||||
|
||||
func InitNamedInterface(ret *InterfaceType, methods []Imethod) {
|
||||
ret.Methods = methods
|
||||
if len(methods) == 0 {
|
||||
ret.Equal = nilinterequal
|
||||
} else {
|
||||
ret.Equal = interequal
|
||||
}
|
||||
}
|
||||
|
||||
func Interface(pkgPath string, methods []Imethod) *InterfaceType {
|
||||
if t := rtypeList.findInterface(pkgPath, methods); t != nil {
|
||||
return t
|
||||
}
|
||||
ret := &abi.InterfaceType{
|
||||
Type: Type{
|
||||
Size_: unsafe.Sizeof(eface{}),
|
||||
PtrBytes: 2 * pointerSize,
|
||||
Align_: uint8(pointerAlign),
|
||||
FieldAlign_: uint8(pointerAlign),
|
||||
Kind_: uint8(abi.Interface),
|
||||
},
|
||||
PkgPath_: pkgPath,
|
||||
Methods: methods,
|
||||
}
|
||||
if len(methods) == 0 {
|
||||
ret.Equal = nilinterequal
|
||||
} else {
|
||||
ret.Equal = interequal
|
||||
}
|
||||
ret.Str_ = interfaceStr(ret)
|
||||
var hash uint32 = 9103
|
||||
// Hash methods.
|
||||
for _, m := range methods {
|
||||
// Use shallow hash on method signature to
|
||||
// avoid anonymous interface cycles.
|
||||
hash += 3*hashString(m.Name()) + 5*shallowHash(&m.Typ_.Type)
|
||||
}
|
||||
ret.Hash = hash
|
||||
rtypeList.addType(&ret.Type)
|
||||
return ret
|
||||
}
|
||||
|
||||
// NewItab returns a new itab.
|
||||
func NewItab(inter *InterfaceType, typ *Type) *Itab {
|
||||
if typ == nil {
|
||||
return nil
|
||||
}
|
||||
n := len(inter.Methods)
|
||||
size := itabHdrSize + uintptr(n)*pointerSize
|
||||
ptr := AllocU(size)
|
||||
|
||||
ret := (*itab)(ptr)
|
||||
ret.inter = inter
|
||||
ret._type = typ
|
||||
ret.hash = typ.Hash
|
||||
|
||||
u := typ.Uncommon()
|
||||
if u == nil {
|
||||
ret.fun[0] = 0
|
||||
} else {
|
||||
data := (*uintptr)(c.Advance(ptr, int(itabHdrSize)))
|
||||
mthds := methods(u, inter.PkgPath_)
|
||||
for i, m := range inter.Methods {
|
||||
fn := findMethod(mthds, m)
|
||||
if fn == nil {
|
||||
ret.fun[0] = 0
|
||||
break
|
||||
}
|
||||
*c.Advance(data, i) = uintptr(fn)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func findMethod(mthds []abi.Method, im abi.Imethod) abi.Text {
|
||||
imName := im.Name_
|
||||
for _, m := range mthds {
|
||||
mName := m.Name_
|
||||
if mName >= imName {
|
||||
if mName == imName && m.Mtyp_ == im.Typ_ {
|
||||
return m.Ifn_
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func methods(u *abi.UncommonType, from string) []abi.Method {
|
||||
if u.PkgPath_ == from {
|
||||
return u.Methods()
|
||||
}
|
||||
return u.ExportedMethods()
|
||||
}
|
||||
|
||||
func IfaceType(i iface) *abi.Type {
|
||||
if i.tab == nil {
|
||||
return nil
|
||||
}
|
||||
return i.tab._type
|
||||
}
|
||||
|
||||
func IfacePtrData(i iface) unsafe.Pointer {
|
||||
if i.tab == nil {
|
||||
panic(errorString("invalid memory address or nil pointer dereference").Error())
|
||||
}
|
||||
switch i.tab._type.Kind() {
|
||||
case abi.Bool, abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Int64,
|
||||
abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uint64, abi.Uintptr,
|
||||
abi.Float32, abi.Float64, abi.Array, abi.Struct:
|
||||
if isDirectIface(i.tab._type) {
|
||||
return unsafe.Pointer(&i.data)
|
||||
}
|
||||
}
|
||||
return i.data
|
||||
}
|
||||
|
||||
// Implements reports whether the type V implements the interface type T.
|
||||
func Implements(T, V *abi.Type) bool {
|
||||
if V == nil {
|
||||
return false
|
||||
}
|
||||
if T.Kind() != abi.Interface {
|
||||
return false
|
||||
}
|
||||
t := (*abi.InterfaceType)(unsafe.Pointer(T))
|
||||
|
||||
if len(t.Methods) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// The same algorithm applies in both cases, but the
|
||||
// method tables for an interface type and a concrete type
|
||||
// are different, so the code is duplicated.
|
||||
// In both cases the algorithm is a linear scan over the two
|
||||
// lists - T's methods and V's methods - simultaneously.
|
||||
// Since method tables are stored in a unique sorted order
|
||||
// (alphabetical, with no duplicate method names), the scan
|
||||
// through V's methods must hit a match for each of T's
|
||||
// methods along the way, or else V does not implement T.
|
||||
// This lets us run the scan in overall linear time instead of
|
||||
// the quadratic time a naive search would require.
|
||||
// See also ../runtime/iface.go.
|
||||
if V.Kind() == abi.Interface {
|
||||
v := (*abi.InterfaceType)(unsafe.Pointer(V))
|
||||
i := 0
|
||||
for j := 0; j < len(v.Methods); j++ {
|
||||
tm := &t.Methods[i]
|
||||
vm := &v.Methods[j]
|
||||
if vm.Name_ == tm.Name_ && vm.Typ_ == tm.Typ_ {
|
||||
if i++; i >= len(t.Methods) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
v := V.Uncommon()
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
i := 0
|
||||
vmethods := v.Methods()
|
||||
for j := 0; j < int(v.Mcount); j++ {
|
||||
tm := &t.Methods[i]
|
||||
vm := vmethods[j]
|
||||
if vm.Name_ == tm.Name_ && vm.Mtyp_ == tm.Typ_ {
|
||||
if i++; i >= len(t.Methods) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func EfaceEqual(v, u eface) bool {
|
||||
if v._type == nil || u._type == nil {
|
||||
return v._type == u._type
|
||||
}
|
||||
if v._type != u._type {
|
||||
return false
|
||||
}
|
||||
if isDirectIface(v._type) {
|
||||
return v.data == u.data
|
||||
}
|
||||
if equal := v._type.Equal; equal != nil {
|
||||
return equal(v.data, u.data)
|
||||
}
|
||||
panic(errorString("comparing uncomparable type " + v._type.String()).Error())
|
||||
}
|
||||
|
||||
func (v eface) Kind() abi.Kind {
|
||||
if v._type == nil {
|
||||
return abi.Invalid
|
||||
}
|
||||
return v._type.Kind()
|
||||
}
|
||||
|
||||
func (v eface) Elem() eface {
|
||||
switch v.Kind() {
|
||||
case abi.Interface:
|
||||
var i any
|
||||
tt := (*abi.InterfaceType)(unsafe.Pointer(v._type))
|
||||
if len(tt.Methods) == 0 {
|
||||
i = *(*any)(v.data)
|
||||
} else {
|
||||
i = (any)(*(*interface {
|
||||
M()
|
||||
})(v.data))
|
||||
}
|
||||
return *(*eface)(unsafe.Pointer(&i))
|
||||
case abi.Pointer:
|
||||
ptr := v.data
|
||||
if isDirectIface(v._type) {
|
||||
ptr = *(*unsafe.Pointer)(ptr)
|
||||
}
|
||||
if ptr == nil {
|
||||
return eface{}
|
||||
}
|
||||
return eface{v._type.Elem(), ptr}
|
||||
}
|
||||
panic("invalid eface elem")
|
||||
}
|
||||
|
||||
func SetDirectIface(t *abi.Type) {
|
||||
t.Kind_ |= abi.KindDirectIface
|
||||
}
|
||||
|
||||
func isDirectIface(t *_type) bool {
|
||||
return t.Kind_&abi.KindDirectIface != 0
|
||||
}
|
||||
|
||||
func interfaceStr(ft *abi.InterfaceType) string {
|
||||
repr := make([]byte, 0, 64)
|
||||
repr = append(repr, "interface {"...)
|
||||
for i, t := range ft.Methods {
|
||||
if i > 0 {
|
||||
repr = append(repr, ';')
|
||||
}
|
||||
repr = append(repr, ' ')
|
||||
repr = append(repr, t.Name_...)
|
||||
repr = append(repr, t.Typ_.String()[4:]...)
|
||||
}
|
||||
if len(ft.Methods) > 0 {
|
||||
repr = append(repr, ' ')
|
||||
}
|
||||
repr = append(repr, '}')
|
||||
return string(repr)
|
||||
}
|
||||
|
||||
func funcStr(ft *abi.FuncType) string {
|
||||
repr := make([]byte, 0, 64)
|
||||
repr = append(repr, "func("...)
|
||||
for i, t := range ft.In {
|
||||
if i > 0 {
|
||||
repr = append(repr, ", "...)
|
||||
}
|
||||
if ft.Variadic() && i == len(ft.In)-1 {
|
||||
repr = append(repr, "..."...)
|
||||
repr = append(repr, (*abi.SliceType)(unsafe.Pointer(t)).Elem.String()...)
|
||||
} else {
|
||||
repr = append(repr, t.String()...)
|
||||
}
|
||||
}
|
||||
repr = append(repr, ')')
|
||||
out := ft.Out
|
||||
if len(out) == 1 {
|
||||
repr = append(repr, ' ')
|
||||
} else if len(out) > 1 {
|
||||
repr = append(repr, " ("...)
|
||||
}
|
||||
for i, t := range out {
|
||||
if i > 0 {
|
||||
repr = append(repr, ", "...)
|
||||
}
|
||||
repr = append(repr, t.String()...)
|
||||
}
|
||||
if len(out) > 1 {
|
||||
repr = append(repr, ')')
|
||||
}
|
||||
return string(repr)
|
||||
}
|
||||
|
||||
func hashTuple(tuple []*Type) uint32 {
|
||||
// See go/types.identicalTypes for rationale.
|
||||
n := len(tuple)
|
||||
hash := 9137 + 2*uint32(n)
|
||||
for i := 0; i < n; i++ {
|
||||
hash += 3 * tuple[i].Hash
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// shallowHash computes a hash of t without looking at any of its
|
||||
// element Types, to avoid potential anonymous cycles in the types of
|
||||
// interface methods.
|
||||
//
|
||||
// When an unnamed non-empty interface type appears anywhere among the
|
||||
// arguments or results of an interface method, there is a potential
|
||||
// for endless recursion. Consider:
|
||||
//
|
||||
// type X interface { m() []*interface { X } }
|
||||
//
|
||||
// The problem is that the Methods of the interface in m's result type
|
||||
// include m itself; there is no mention of the named type X that
|
||||
// might help us break the cycle.
|
||||
// (See comment in go/types.identical, case *Interface, for more.)
|
||||
func shallowHash(t *abi.Type) uint32 {
|
||||
// t is the type of an interface method (Signature),
|
||||
// its params or results (Tuples), or their immediate
|
||||
// elements (mostly Slice, Pointer, Basic, Named),
|
||||
// so there's no need to optimize anything else.
|
||||
|
||||
if t.HasName() {
|
||||
return 9157 + hashString(t.Uncommon().PkgPath_) + hashString(t.Str_)
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case abi.Bool, abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Int64,
|
||||
abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uint64, abi.Uintptr,
|
||||
abi.Float32, abi.Float64, abi.Complex64, abi.Complex128, abi.String, abi.UnsafePointer:
|
||||
return 45212177 * uint32(t.Kind())
|
||||
|
||||
case abi.Func:
|
||||
t := t.FuncType()
|
||||
var hash uint32 = 604171
|
||||
if t.Variadic() {
|
||||
hash *= 971767
|
||||
}
|
||||
// The Signature/Tuple recursion is always finite
|
||||
// and invariably shallow.
|
||||
return hash + 1062599*shallowHashTuple(t.In) + 1282529*shallowHashTuple(t.Out)
|
||||
|
||||
case abi.Array:
|
||||
return 1524181 + 2*uint32(t.ArrayType().Len)
|
||||
|
||||
case abi.Slice:
|
||||
return 2690201
|
||||
|
||||
case abi.Struct:
|
||||
return 3326489
|
||||
|
||||
case abi.Pointer:
|
||||
return 4393139
|
||||
|
||||
case abi.Interface:
|
||||
return 2124679 // no recursion here
|
||||
|
||||
case abi.Map:
|
||||
return 9109
|
||||
|
||||
case abi.Chan:
|
||||
return 9127
|
||||
}
|
||||
|
||||
panic("shallowHash:" + t.String())
|
||||
}
|
||||
|
||||
func shallowHashTuple(tuple []*Type) uint32 {
|
||||
n := len(tuple)
|
||||
hash := 9137 + 2*uint32(n)
|
||||
for i := 0; i < n; i++ {
|
||||
hash += 53471161 * shallowHash(tuple[i])
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
38
compiler/internal/runtime/z_gc.go
Normal file
38
compiler/internal/runtime/z_gc.go
Normal file
@@ -0,0 +1,38 @@
|
||||
//go:build !nogc
|
||||
// +build !nogc
|
||||
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
"github.com/goplus/llgo/c/bdwgc"
|
||||
)
|
||||
|
||||
// AllocU allocates uninitialized memory.
|
||||
func AllocU(size uintptr) unsafe.Pointer {
|
||||
return bdwgc.Malloc(size)
|
||||
}
|
||||
|
||||
// AllocZ allocates zero-initialized memory.
|
||||
func AllocZ(size uintptr) unsafe.Pointer {
|
||||
ret := bdwgc.Malloc(size)
|
||||
return c.Memset(ret, 0, size)
|
||||
}
|
||||
91
compiler/internal/runtime/z_map.go
Normal file
91
compiler/internal/runtime/z_map.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/compiler/internal/abi"
|
||||
)
|
||||
|
||||
// Map represents a Go map.
|
||||
type Map = hmap
|
||||
type maptype = abi.MapType
|
||||
type arraytype = abi.ArrayType
|
||||
type structtype = abi.StructType
|
||||
|
||||
type slice struct {
|
||||
array unsafe.Pointer
|
||||
len int
|
||||
cap int
|
||||
}
|
||||
|
||||
func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
|
||||
Typedmemmove(typ, dst, src)
|
||||
}
|
||||
|
||||
// MakeSmallMap creates a new small map.
|
||||
func MakeSmallMap() *Map {
|
||||
return makemap_small()
|
||||
}
|
||||
|
||||
func MakeMap(t *maptype, hint int) *hmap {
|
||||
return makemap(t, hint, nil)
|
||||
}
|
||||
|
||||
func MapAssign(t *maptype, h *Map, key unsafe.Pointer) unsafe.Pointer {
|
||||
return mapassign(t, h, key)
|
||||
}
|
||||
|
||||
func MapAccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
||||
return mapaccess1(t, h, key)
|
||||
}
|
||||
|
||||
func MapAccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
|
||||
return mapaccess2(t, h, key)
|
||||
}
|
||||
|
||||
func MapDelete(t *maptype, h *hmap, key unsafe.Pointer) {
|
||||
mapdelete(t, h, key)
|
||||
}
|
||||
|
||||
func MapClear(t *maptype, h *hmap) {
|
||||
mapclear(t, h)
|
||||
}
|
||||
|
||||
func NewMapIter(t *maptype, h *hmap) *hiter {
|
||||
var it hiter
|
||||
mapiterinit(t, h, &it)
|
||||
return &it
|
||||
}
|
||||
|
||||
func MapIterNext(it *hiter) (ok bool, k unsafe.Pointer, v unsafe.Pointer) {
|
||||
if it.key == nil {
|
||||
return
|
||||
}
|
||||
ok = true
|
||||
k, v = it.key, it.elem
|
||||
mapiternext(it)
|
||||
return
|
||||
}
|
||||
|
||||
func MapLen(h *Map) int {
|
||||
if h == nil {
|
||||
return 0
|
||||
}
|
||||
return h.count
|
||||
}
|
||||
37
compiler/internal/runtime/z_nogc.go
Normal file
37
compiler/internal/runtime/z_nogc.go
Normal file
@@ -0,0 +1,37 @@
|
||||
//go:build nogc
|
||||
// +build nogc
|
||||
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
)
|
||||
|
||||
// AllocU allocates uninitialized memory.
|
||||
func AllocU(size uintptr) unsafe.Pointer {
|
||||
return c.Malloc(size)
|
||||
}
|
||||
|
||||
// AllocZ allocates zero-initialized memory.
|
||||
func AllocZ(size uintptr) unsafe.Pointer {
|
||||
ret := c.Malloc(size)
|
||||
return c.Memset(ret, 0, size)
|
||||
}
|
||||
90
compiler/internal/runtime/z_print.go
Normal file
90
compiler/internal/runtime/z_print.go
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
)
|
||||
|
||||
func boolCStr(v bool) *c.Char {
|
||||
if v {
|
||||
return c.Str("true")
|
||||
}
|
||||
return c.Str("false")
|
||||
}
|
||||
|
||||
func PrintBool(v bool) {
|
||||
c.Fprintf(c.Stderr, boolCStr(v))
|
||||
}
|
||||
|
||||
func PrintByte(v byte) {
|
||||
c.Fputc(c.Int(v), c.Stderr)
|
||||
}
|
||||
|
||||
func PrintFloat(v float64) {
|
||||
switch {
|
||||
case v != v:
|
||||
c.Fprintf(c.Stderr, c.Str("NaN"))
|
||||
return
|
||||
case v+v == v && v != 0:
|
||||
if v > 0 {
|
||||
c.Fprintf(c.Stderr, c.Str("+Inf"))
|
||||
} else {
|
||||
c.Fprintf(c.Stderr, c.Str("-Inf"))
|
||||
}
|
||||
return
|
||||
}
|
||||
c.Fprintf(c.Stderr, c.Str("%+e"), v)
|
||||
}
|
||||
|
||||
func PrintComplex(v complex128) {
|
||||
print("(", real(v), imag(v), "i)")
|
||||
}
|
||||
|
||||
func PrintUint(v uint64) {
|
||||
c.Fprintf(c.Stderr, c.Str("%llu"), v)
|
||||
}
|
||||
|
||||
func PrintInt(v int64) {
|
||||
c.Fprintf(c.Stderr, c.Str("%lld"), v)
|
||||
}
|
||||
|
||||
func PrintHex(v uint64) {
|
||||
c.Fprintf(c.Stderr, c.Str("%llx"), v)
|
||||
}
|
||||
|
||||
func PrintPointer(p unsafe.Pointer) {
|
||||
c.Fprintf(c.Stderr, c.Str("%p"), p)
|
||||
}
|
||||
|
||||
func PrintString(s String) {
|
||||
c.Fwrite(s.data, 1, uintptr(s.len), c.Stderr)
|
||||
}
|
||||
|
||||
func PrintSlice(s Slice) {
|
||||
print("[", s.len, "/", s.cap, "]", s.data)
|
||||
}
|
||||
|
||||
func PrintEface(e Eface) {
|
||||
print("(", e._type, ",", e.data, ")")
|
||||
}
|
||||
|
||||
func PrintIface(i Iface) {
|
||||
print("(", i.tab, ",", i.data, ")")
|
||||
}
|
||||
136
compiler/internal/runtime/z_rt.go
Normal file
136
compiler/internal/runtime/z_rt.go
Normal file
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
"github.com/goplus/llgo/c/debug"
|
||||
"github.com/goplus/llgo/c/pthread"
|
||||
"github.com/goplus/llgo/c/signal"
|
||||
"github.com/goplus/llgo/c/syscall"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// Defer presents defer statements in a function.
|
||||
type Defer struct {
|
||||
Addr unsafe.Pointer // sigjmpbuf
|
||||
Bits uintptr
|
||||
Link *Defer
|
||||
Reth unsafe.Pointer // block address after Rethrow
|
||||
Rund unsafe.Pointer // block address after RunDefers
|
||||
}
|
||||
|
||||
// Recover recovers a panic.
|
||||
func Recover() (ret any) {
|
||||
ptr := excepKey.Get()
|
||||
if ptr != nil {
|
||||
excepKey.Set(nil)
|
||||
ret = *(*any)(ptr)
|
||||
c.Free(ptr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Panic panics with a value.
|
||||
func Panic(v any) {
|
||||
ptr := c.Malloc(unsafe.Sizeof(v))
|
||||
*(*any)(ptr) = v
|
||||
excepKey.Set(ptr)
|
||||
|
||||
Rethrow((*Defer)(c.GoDeferData()))
|
||||
}
|
||||
|
||||
// Rethrow rethrows a panic.
|
||||
func Rethrow(link *Defer) {
|
||||
if ptr := excepKey.Get(); ptr != nil {
|
||||
if link == nil {
|
||||
TracePanic(*(*any)(ptr))
|
||||
debug.StackTrace(0, func(fr *debug.Frame) bool {
|
||||
var info debug.Info
|
||||
debug.Addrinfo(unsafe.Pointer(fr.PC), &info)
|
||||
c.Fprintf(c.Stderr, c.Str("[0x%08X %s+0x%x, SP = 0x%x]\n"), fr.PC, fr.Name, fr.Offset, fr.SP)
|
||||
return true
|
||||
})
|
||||
|
||||
c.Free(ptr)
|
||||
c.Exit(2)
|
||||
} else {
|
||||
c.Siglongjmp(link.Addr, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
excepKey pthread.Key
|
||||
)
|
||||
|
||||
func init() {
|
||||
excepKey.Create(nil)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// TracePanic prints panic message.
|
||||
func TracePanic(v any) {
|
||||
print("panic: ")
|
||||
printany(v)
|
||||
println("\n")
|
||||
}
|
||||
|
||||
/*
|
||||
func stringTracef(fp c.FilePtr, format *c.Char, s String) {
|
||||
cs := c.Alloca(uintptr(s.len) + 1)
|
||||
c.Fprintf(fp, format, CStrCopy(cs, s))
|
||||
}
|
||||
*/
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// New allocates memory and initializes it to zero.
|
||||
func New(t *Type) unsafe.Pointer {
|
||||
return AllocZ(t.Size_)
|
||||
}
|
||||
|
||||
// NewArray allocates memory for an array and initializes it to zero.
|
||||
func NewArray(t *Type, n int) unsafe.Pointer {
|
||||
return AllocZ(uintptr(n) * t.Size_)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// TODO(xsw): check this
|
||||
// must match declarations in runtime/map.go.
|
||||
const MaxZero = 1024
|
||||
|
||||
var ZeroVal [MaxZero]byte
|
||||
|
||||
func init() {
|
||||
signal.Signal(c.Int(syscall.SIGSEGV), func(v c.Int) {
|
||||
switch syscall.Signal(v) {
|
||||
case syscall.SIGSEGV:
|
||||
panic(errorString("invalid memory address or nil pointer dereference"))
|
||||
default:
|
||||
var buf [20]byte
|
||||
panic(errorString("unexpected signal value: " + string(itoa(buf[:], uint64(v)))))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
149
compiler/internal/runtime/z_slice.go
Normal file
149
compiler/internal/runtime/z_slice.go
Normal file
@@ -0,0 +1,149 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
"github.com/goplus/llgo/compiler/internal/abi"
|
||||
"github.com/goplus/llgo/compiler/internal/runtime/math"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// Slice is the runtime representation of a slice.
|
||||
type Slice struct {
|
||||
data unsafe.Pointer
|
||||
len int
|
||||
cap int
|
||||
}
|
||||
|
||||
func NewSlice3(base unsafe.Pointer, eltSize, cap, i, j, k int) (s Slice) {
|
||||
if i < 0 || j < i || k < j || k > cap {
|
||||
panic("slice index out of bounds")
|
||||
}
|
||||
s.len = j - i
|
||||
s.cap = k - i
|
||||
if k-i > 0 {
|
||||
s.data = c.Advance(base, i*eltSize)
|
||||
} else {
|
||||
s.data = base
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SliceAppend append elem data and returns a slice.
|
||||
func SliceAppend(src Slice, data unsafe.Pointer, num, etSize int) Slice {
|
||||
if etSize == 0 {
|
||||
return src
|
||||
}
|
||||
oldLen := src.len
|
||||
src = GrowSlice(src, num, etSize)
|
||||
c.Memcpy(c.Advance(src.data, oldLen*etSize), data, uintptr(num*etSize))
|
||||
return src
|
||||
}
|
||||
|
||||
// GrowSlice grows slice and returns the grown slice.
|
||||
func GrowSlice(src Slice, num, etSize int) Slice {
|
||||
oldLen := src.len
|
||||
newLen := oldLen + num
|
||||
if newLen > src.cap {
|
||||
newCap := nextslicecap(newLen, src.cap)
|
||||
p := AllocZ(uintptr(newCap * etSize))
|
||||
if oldLen != 0 {
|
||||
c.Memcpy(p, src.data, uintptr(oldLen*etSize))
|
||||
}
|
||||
src.data = p
|
||||
src.cap = newCap
|
||||
}
|
||||
src.len = newLen
|
||||
return src
|
||||
}
|
||||
|
||||
// nextslicecap computes the next appropriate slice length.
|
||||
func nextslicecap(newLen, oldCap int) int {
|
||||
newcap := oldCap
|
||||
doublecap := newcap + newcap
|
||||
if newLen > doublecap {
|
||||
return newLen
|
||||
}
|
||||
|
||||
const threshold = 256
|
||||
if oldCap < threshold {
|
||||
return doublecap
|
||||
}
|
||||
for {
|
||||
// Transition from growing 2x for small slices
|
||||
// to growing 1.25x for large slices. This formula
|
||||
// gives a smooth-ish transition between the two.
|
||||
newcap += (newcap + 3*threshold) >> 2
|
||||
|
||||
// We need to check `newcap >= newLen` and whether `newcap` overflowed.
|
||||
// newLen is guaranteed to be larger than zero, hence
|
||||
// when newcap overflows then `uint(newcap) > uint(newLen)`.
|
||||
// This allows to check for both with the same comparison.
|
||||
if uint(newcap) >= uint(newLen) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Set newcap to the requested cap when
|
||||
// the newcap calculation overflowed.
|
||||
if newcap <= 0 {
|
||||
return newLen
|
||||
}
|
||||
return newcap
|
||||
}
|
||||
|
||||
// SliceCopy copy data to slice and returns a slice.
|
||||
func SliceCopy(dst Slice, data unsafe.Pointer, num int, etSize int) int {
|
||||
n := dst.len
|
||||
if n > num {
|
||||
n = num
|
||||
}
|
||||
if n > 0 {
|
||||
c.Memmove(dst.data, data, uintptr(n*etSize))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func MakeSlice(len, cap int, etSize int) Slice {
|
||||
mem, overflow := math.MulUintptr(uintptr(etSize), uintptr(cap))
|
||||
if overflow || mem > maxAlloc || len < 0 || len > cap {
|
||||
mem, overflow := math.MulUintptr(uintptr(etSize), uintptr(len))
|
||||
if overflow || mem > maxAlloc || len < 0 {
|
||||
panicmakeslicelen()
|
||||
}
|
||||
panicmakeslicecap()
|
||||
}
|
||||
return Slice{AllocZ(mem), len, cap}
|
||||
}
|
||||
|
||||
func panicmakeslicelen() {
|
||||
panic(errorString("makeslice: len out of range"))
|
||||
}
|
||||
|
||||
func panicmakeslicecap() {
|
||||
panic(errorString("makeslice: cap out of range"))
|
||||
}
|
||||
|
||||
func SliceClear(t *abi.SliceType, s Slice) {
|
||||
c.Memset(s.data, 0, uintptr(s.len)*t.Elem.Size())
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
195
compiler/internal/runtime/z_string.go
Normal file
195
compiler/internal/runtime/z_string.go
Normal file
@@ -0,0 +1,195 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// String is the runtime representation of a string.
|
||||
// It cannot be used safely or portably and its representation may
|
||||
// change in a later release.
|
||||
//
|
||||
// Unlike reflect.StringHeader, its Data field is sufficient to guarantee the
|
||||
// data it references will not be garbage collected.
|
||||
type String struct {
|
||||
data unsafe.Pointer
|
||||
len int
|
||||
}
|
||||
|
||||
// StringCat concatenates two strings.
|
||||
func StringCat(a, b String) String {
|
||||
n := a.len + b.len
|
||||
dest := AllocU(uintptr(n))
|
||||
c.Memcpy(dest, a.data, uintptr(a.len))
|
||||
c.Memcpy(c.Advance(dest, a.len), b.data, uintptr(b.len))
|
||||
return String{dest, n}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// CStrCopy copies a Go string to a C string buffer and returns it.
|
||||
func CStrCopy(dest unsafe.Pointer, s String) *int8 {
|
||||
n := s.len
|
||||
c.Memcpy(dest, s.data, uintptr(n))
|
||||
*(*int8)(c.Advance(dest, n)) = 0
|
||||
return (*int8)(dest)
|
||||
}
|
||||
|
||||
func CStrDup(s String) *int8 {
|
||||
dest := AllocU(uintptr(s.len + 1))
|
||||
return CStrCopy(dest, s)
|
||||
}
|
||||
|
||||
func StringSlice(base String, i, j int) String {
|
||||
if i < 0 || j < i || j > base.len {
|
||||
panic("string slice index out of bounds")
|
||||
}
|
||||
if i < base.len {
|
||||
return String{c.Advance(base.data, i), j - i}
|
||||
}
|
||||
return String{nil, 0}
|
||||
}
|
||||
|
||||
type StringIter struct {
|
||||
s string
|
||||
pos int
|
||||
}
|
||||
|
||||
func NewStringIter(s string) *StringIter {
|
||||
return &StringIter{s, 0}
|
||||
}
|
||||
|
||||
func StringIterNext(it *StringIter) (ok bool, k int, v rune) {
|
||||
if it.pos >= len(it.s) {
|
||||
return false, 0, 0
|
||||
}
|
||||
k = it.pos
|
||||
if c := it.s[it.pos]; c < runeSelf {
|
||||
it.pos++
|
||||
v = rune(c)
|
||||
} else {
|
||||
v, it.pos = decoderune(it.s, it.pos)
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func StringToBytes(s String) []byte {
|
||||
if s.len == 0 {
|
||||
return nil
|
||||
}
|
||||
data := make([]byte, s.len)
|
||||
c.Memcpy(unsafe.Pointer(&data[0]), s.data, uintptr(s.len))
|
||||
return data
|
||||
}
|
||||
|
||||
func StringToRunes(s string) []rune {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
data := make([]rune, len(s))
|
||||
var index uint
|
||||
for i := 0; i < len(s); {
|
||||
if c := s[i]; c < runeSelf {
|
||||
data[index] = rune(c)
|
||||
i++
|
||||
} else {
|
||||
data[index], i = decoderune(s, i)
|
||||
}
|
||||
index++
|
||||
}
|
||||
return data[:index:index]
|
||||
}
|
||||
|
||||
func StringFromCStr(cstr *int8) (s String) {
|
||||
return StringFrom(unsafe.Pointer(cstr), int(c.Strlen(cstr)))
|
||||
}
|
||||
|
||||
func StringFromBytes(b Slice) (s String) {
|
||||
return StringFrom(b.data, b.len)
|
||||
}
|
||||
|
||||
func StringFrom(data unsafe.Pointer, n int) (s String) {
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
s.len = n
|
||||
s.data = AllocU(uintptr(n))
|
||||
c.Memcpy(s.data, data, uintptr(n))
|
||||
return
|
||||
}
|
||||
|
||||
func StringFromRunes(rs []rune) (s String) {
|
||||
if len(rs) == 0 {
|
||||
return
|
||||
}
|
||||
data := make([]byte, len(rs)*4)
|
||||
var index int
|
||||
for _, r := range rs {
|
||||
n := encoderune(data[index:], r)
|
||||
index += n
|
||||
}
|
||||
s.len = index
|
||||
s.data = unsafe.Pointer(&data[0])
|
||||
return
|
||||
}
|
||||
|
||||
func StringFromRune(r rune) (s String) {
|
||||
var buf [4]byte
|
||||
n := encoderune(buf[:], r)
|
||||
s.len = n
|
||||
s.data = unsafe.Pointer(&buf[0])
|
||||
return
|
||||
}
|
||||
|
||||
func StringEqual(x, y String) bool {
|
||||
if x.len != y.len {
|
||||
return false
|
||||
}
|
||||
if x.data != y.data {
|
||||
for i := 0; i < x.len; i++ {
|
||||
if *(*byte)(c.Advance(x.data, i)) != *(*byte)(c.Advance(y.data, i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func StringLess(x, y String) bool {
|
||||
n := x.len
|
||||
if n > y.len {
|
||||
n = y.len
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
ix := *(*byte)(c.Advance(x.data, i))
|
||||
iy := *(*byte)(c.Advance(y.data, i))
|
||||
if ix < iy {
|
||||
return true
|
||||
} else if ix > iy {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return x.len < y.len
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
28
compiler/internal/runtime/z_thread.go
Normal file
28
compiler/internal/runtime/z_thread.go
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
_ "unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
"github.com/goplus/llgo/c/pthread"
|
||||
)
|
||||
|
||||
func CreateThread(th *pthread.Thread, attr *pthread.Attr, routine pthread.RoutineFunc, arg c.Pointer) c.Int {
|
||||
return pthread.Create(th, attr, routine, arg)
|
||||
}
|
||||
591
compiler/internal/runtime/z_type.go
Normal file
591
compiler/internal/runtime/z_type.go
Normal file
@@ -0,0 +1,591 @@
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/compiler/internal/abi"
|
||||
)
|
||||
|
||||
type Kind = abi.Kind
|
||||
type Type = abi.Type
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
var (
|
||||
tyBasic [abi.UnsafePointer + 1]*Type
|
||||
)
|
||||
|
||||
func basicEqual(kind Kind, size uintptr) func(a, b unsafe.Pointer) bool {
|
||||
switch kind {
|
||||
case abi.Bool, abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Int64,
|
||||
abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uint64, abi.Uintptr:
|
||||
switch size {
|
||||
case 1:
|
||||
return memequal8
|
||||
case 2:
|
||||
return memequal16
|
||||
case 4:
|
||||
return memequal32
|
||||
case 8:
|
||||
return memequal64
|
||||
}
|
||||
case abi.Float32:
|
||||
return f32equal
|
||||
case abi.Float64:
|
||||
return f64equal
|
||||
case abi.Complex64:
|
||||
return c64equal
|
||||
case abi.Complex128:
|
||||
return c128equal
|
||||
case abi.String:
|
||||
return strequal
|
||||
case abi.UnsafePointer:
|
||||
return memequalptr
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func basicFlags(kind Kind) abi.TFlag {
|
||||
switch kind {
|
||||
case abi.Float32, abi.Float64, abi.Complex64, abi.Complex128, abi.String:
|
||||
return 0
|
||||
}
|
||||
return abi.TFlagRegularMemory
|
||||
}
|
||||
|
||||
func Basic(_kind Kind) *Type {
|
||||
kind := _kind & abi.KindMask
|
||||
if tyBasic[kind] == nil {
|
||||
name, size, align := basicTypeInfo(kind)
|
||||
var bytes uintptr
|
||||
if kind == abi.String {
|
||||
bytes = pointerSize
|
||||
}
|
||||
tyBasic[kind] = &Type{
|
||||
Size_: size,
|
||||
PtrBytes: bytes,
|
||||
Hash: uint32(kind),
|
||||
Align_: uint8(align),
|
||||
FieldAlign_: uint8(align),
|
||||
Kind_: uint8(_kind),
|
||||
Equal: basicEqual(kind, size),
|
||||
TFlag: basicFlags(kind),
|
||||
Str_: name,
|
||||
}
|
||||
}
|
||||
return tyBasic[kind]
|
||||
}
|
||||
|
||||
func basicTypeInfo(kind abi.Kind) (string, uintptr, uintptr) {
|
||||
switch kind {
|
||||
case abi.Bool:
|
||||
return "bool", unsafe.Sizeof(false), unsafe.Alignof(false)
|
||||
case abi.Int:
|
||||
return "int", unsafe.Sizeof(0), unsafe.Alignof(0)
|
||||
case abi.Int8:
|
||||
return "int8", 1, 1
|
||||
case abi.Int16:
|
||||
return "int16", 2, 2
|
||||
case abi.Int32:
|
||||
return "int32", 4, 4
|
||||
case abi.Int64:
|
||||
return "int64", 8, 8
|
||||
case abi.Uint:
|
||||
return "uint", unsafe.Sizeof(uint(0)), unsafe.Alignof(uint(0))
|
||||
case abi.Uint8:
|
||||
return "uint8", 1, 1
|
||||
case abi.Uint16:
|
||||
return "uint16", 2, 2
|
||||
case abi.Uint32:
|
||||
return "uint32", 4, 4
|
||||
case abi.Uint64:
|
||||
return "uint64", 8, 8
|
||||
case abi.Uintptr:
|
||||
return "uintptr", unsafe.Sizeof(uintptr(0)), unsafe.Alignof(uintptr(0))
|
||||
case abi.Float32:
|
||||
return "float32", 4, 4
|
||||
case abi.Float64:
|
||||
return "float64", 8, 8
|
||||
case abi.Complex64:
|
||||
return "complex64", 8, 4
|
||||
case abi.Complex128:
|
||||
return "complex128", 16, 8
|
||||
case abi.String:
|
||||
return "string", unsafe.Sizeof(String{}), unsafe.Alignof("")
|
||||
case abi.UnsafePointer:
|
||||
return "unsafe.Pointer", unsafe.Sizeof(unsafe.Pointer(nil)), unsafe.Alignof(unsafe.Pointer(nil))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// StructField returns a struct field.
|
||||
func StructField(name string, typ *Type, off uintptr, tag string, embedded bool) abi.StructField {
|
||||
return abi.StructField{
|
||||
Name_: name,
|
||||
Typ: typ,
|
||||
Offset: off,
|
||||
Tag_: tag,
|
||||
Embedded_: embedded,
|
||||
}
|
||||
}
|
||||
|
||||
// Struct returns a struct type.
|
||||
func Struct(pkgPath string, size uintptr, fields ...abi.StructField) *Type {
|
||||
if t := rtypeList.findStruct(pkgPath, size, fields); t != nil {
|
||||
return t
|
||||
}
|
||||
ret := &abi.StructType{
|
||||
Type: Type{
|
||||
Size_: size,
|
||||
Kind_: uint8(abi.Struct),
|
||||
Str_: structStr(fields),
|
||||
},
|
||||
PkgPath_: pkgPath,
|
||||
Fields: fields,
|
||||
}
|
||||
var hash uint32 = 9059
|
||||
var comparable bool = true
|
||||
var typalign uint8
|
||||
for _, f := range fields {
|
||||
ft := f.Typ
|
||||
if ft.Align_ > typalign {
|
||||
typalign = ft.Align_
|
||||
}
|
||||
if f.Typ.PtrBytes != 0 {
|
||||
ret.PtrBytes = f.Offset + f.Typ.PtrBytes
|
||||
}
|
||||
comparable = comparable && (ft.Equal != nil)
|
||||
if f.Embedded_ {
|
||||
hash += 8861
|
||||
}
|
||||
hash += hashString(f.Tag_)
|
||||
hash += hashString(f.Name_)
|
||||
hash += f.Typ.Hash
|
||||
}
|
||||
ret.Hash = hash
|
||||
ret.Align_ = typalign
|
||||
ret.FieldAlign_ = typalign
|
||||
if comparable {
|
||||
if size == 0 {
|
||||
ret.Equal = memequal0
|
||||
} else {
|
||||
ret.Equal = func(p, q unsafe.Pointer) bool {
|
||||
for _, ft := range fields {
|
||||
pi := add(p, ft.Offset)
|
||||
qi := add(q, ft.Offset)
|
||||
if !ft.Typ.Equal(pi, qi) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if isRegularMemory(&ret.Type) {
|
||||
ret.TFlag = abi.TFlagRegularMemory
|
||||
}
|
||||
if len(fields) == 1 && isDirectIface(fields[0].Typ) {
|
||||
ret.Kind_ |= abi.KindDirectIface
|
||||
}
|
||||
if len(fields) == 2 && fields[0].Name_ == "$f" && fields[0].Typ.Kind() == abi.Func &&
|
||||
fields[1].Name_ == "$data" && fields[1].Typ.Kind() == abi.UnsafePointer {
|
||||
ret.TFlag |= abi.TFlagClosure
|
||||
}
|
||||
rtypeList.addType(&ret.Type)
|
||||
return &ret.Type
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// PointerTo returns the pointer type with element elem.
|
||||
func PointerTo(elem *Type) *Type {
|
||||
ret := elem.PtrToThis_
|
||||
if ret == nil {
|
||||
ret = newPointer(elem)
|
||||
elem.PtrToThis_ = ret
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
const pointerAlign = uint8(unsafe.Alignof(uintptr(0)))
|
||||
|
||||
func newPointer(elem *Type) *Type {
|
||||
ptr := &abi.PtrType{
|
||||
Type: Type{
|
||||
Size_: unsafe.Sizeof(uintptr(0)),
|
||||
PtrBytes: pointerSize,
|
||||
Hash: 9067 + 2*elem.Hash,
|
||||
Align_: pointerAlign,
|
||||
FieldAlign_: pointerAlign,
|
||||
Kind_: uint8(abi.Pointer),
|
||||
Equal: memequalptr,
|
||||
TFlag: abi.TFlagRegularMemory,
|
||||
},
|
||||
Elem: elem,
|
||||
}
|
||||
if (elem.TFlag & abi.TFlagExtraStar) != 0 {
|
||||
ptr.Str_ = "**" + elem.Str_
|
||||
} else {
|
||||
ptr.TFlag |= abi.TFlagExtraStar
|
||||
ptr.Str_ = elem.Str_
|
||||
}
|
||||
return &ptr.Type
|
||||
}
|
||||
|
||||
func setPointer(ptr *abi.PtrType, elem *Type) {
|
||||
ptr.PtrBytes = pointerSize
|
||||
ptr.Hash = uint32(abi.Pointer) // TODO(xsw): hash
|
||||
ptr.Align_ = pointerAlign
|
||||
ptr.FieldAlign_ = pointerAlign
|
||||
ptr.Kind_ = uint8(abi.Pointer)
|
||||
ptr.Equal = memequalptr
|
||||
ptr.Elem = elem
|
||||
ptr.Str_ = elem.Str_
|
||||
ptr.TFlag |= abi.TFlagRegularMemory | abi.TFlagExtraStar
|
||||
}
|
||||
|
||||
// SliceOf returns the slice type with element elem.
|
||||
func SliceOf(elem *Type) *Type {
|
||||
if t := rtypeList.findElem(abi.Slice, elem, 0); t != nil {
|
||||
return t
|
||||
}
|
||||
ret := &abi.SliceType{
|
||||
Type: Type{
|
||||
Size_: unsafe.Sizeof([]int{}),
|
||||
PtrBytes: pointerSize,
|
||||
Hash: 9049 + 2*elem.Hash,
|
||||
Align_: pointerAlign,
|
||||
FieldAlign_: pointerAlign,
|
||||
Kind_: uint8(abi.Slice),
|
||||
Str_: "[]" + elem.String(),
|
||||
},
|
||||
Elem: elem,
|
||||
}
|
||||
rtypeList.addType(&ret.Type)
|
||||
return &ret.Type
|
||||
}
|
||||
|
||||
// ArrayOf returns the array type with element elem and length.
|
||||
func ArrayOf(length uintptr, elem *Type) *Type {
|
||||
if t := rtypeList.findElem(abi.Array, elem, length); t != nil {
|
||||
return t
|
||||
}
|
||||
ret := &abi.ArrayType{
|
||||
Type: Type{
|
||||
Size_: length * elem.Size_,
|
||||
Hash: 9043 + 2*uint32(length) + 3*elem.Hash,
|
||||
Align_: elem.Align_,
|
||||
FieldAlign_: elem.FieldAlign_,
|
||||
Kind_: uint8(abi.Array),
|
||||
Str_: "[" + string(itoa(make([]byte, 20), uint64(length))) + "]" + elem.String(),
|
||||
},
|
||||
Elem: elem,
|
||||
Slice: SliceOf(elem),
|
||||
Len: length,
|
||||
}
|
||||
if length != 0 && elem.PtrBytes != 0 {
|
||||
ret.PtrBytes = ret.Size_ - elem.Size_ + elem.PtrBytes
|
||||
}
|
||||
if eequal := elem.Equal; eequal != nil {
|
||||
if elem.Size_ == 0 {
|
||||
ret.Equal = memequal0
|
||||
} else {
|
||||
ret.Equal = func(p, q unsafe.Pointer) bool {
|
||||
for i := uintptr(0); i < length; i++ {
|
||||
pi := add(p, i*elem.Size_)
|
||||
qi := add(q, i*elem.Size_)
|
||||
if !eequal(pi, qi) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if ret.Len == 0 || ret.Elem.TFlag&abi.TFlagRegularMemory != 0 {
|
||||
ret.TFlag = abi.TFlagRegularMemory
|
||||
}
|
||||
if ret.Len == 1 && isDirectIface(ret.Elem) {
|
||||
ret.Kind_ |= abi.KindDirectIface
|
||||
}
|
||||
rtypeList.addType(&ret.Type)
|
||||
return &ret.Type
|
||||
}
|
||||
|
||||
func ChanOf(dir int, strChan string, elem *Type) *Type {
|
||||
if t := rtypeList.findElem(abi.Chan, elem, uintptr(dir)); t != nil {
|
||||
return t
|
||||
}
|
||||
ret := &abi.ChanType{
|
||||
Type: Type{
|
||||
Size_: pointerSize,
|
||||
PtrBytes: pointerSize,
|
||||
Hash: 9127 + 2*uint32(dir) + 3*elem.Hash,
|
||||
Align_: pointerAlign,
|
||||
TFlag: abi.TFlagRegularMemory,
|
||||
FieldAlign_: pointerAlign,
|
||||
Kind_: uint8(abi.Chan),
|
||||
Equal: memequalptr,
|
||||
Str_: strChan + " " + elem.String(),
|
||||
},
|
||||
Elem: elem,
|
||||
Dir: abi.ChanDir(dir),
|
||||
}
|
||||
rtypeList.addType(&ret.Type)
|
||||
return &ret.Type
|
||||
}
|
||||
|
||||
func MapOf(key, elem *Type, bucket *Type, flags int) *Type {
|
||||
if t := rtypeList.findMap(key, elem); t != nil {
|
||||
return t
|
||||
}
|
||||
ret := &abi.MapType{
|
||||
Type: Type{
|
||||
Size_: unsafe.Sizeof(uintptr(0)),
|
||||
PtrBytes: pointerSize,
|
||||
Hash: 9109 + 2*key.Hash + 3*elem.Hash,
|
||||
Align_: pointerAlign,
|
||||
FieldAlign_: pointerAlign,
|
||||
Kind_: uint8(abi.Map),
|
||||
Str_: "map[" + key.String() + "]" + elem.String(),
|
||||
},
|
||||
Key: key,
|
||||
Elem: elem,
|
||||
Bucket: bucket,
|
||||
KeySize: uint8(key.Size_),
|
||||
ValueSize: uint8(elem.Size_),
|
||||
BucketSize: uint16(bucket.Size_),
|
||||
Flags: uint32(flags),
|
||||
}
|
||||
ret.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
|
||||
return typehash(key, p, seed)
|
||||
}
|
||||
rtypeList.addType(&ret.Type)
|
||||
return &ret.Type
|
||||
}
|
||||
|
||||
func isRegularMemory(t *_type) bool {
|
||||
switch t.Kind() {
|
||||
case abi.Func, abi.Map, abi.Slice, abi.String, abi.Interface:
|
||||
return false
|
||||
case abi.Float32, abi.Float64, abi.Complex64, abi.Complex128:
|
||||
return false
|
||||
case abi.Array:
|
||||
at := t.ArrayType()
|
||||
b := isRegularMemory(at.Elem)
|
||||
if b {
|
||||
return true
|
||||
}
|
||||
if at.Len == 0 {
|
||||
return true
|
||||
}
|
||||
return b
|
||||
case abi.Struct:
|
||||
st := t.StructType()
|
||||
n := len(st.Fields)
|
||||
switch n {
|
||||
case 0:
|
||||
return true
|
||||
case 1:
|
||||
f := st.Fields[0]
|
||||
if f.Name_ == "_" {
|
||||
return false
|
||||
}
|
||||
return isRegularMemory(f.Typ)
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
f := st.Fields[i]
|
||||
if f.Name_ == "_" || !isRegularMemory(f.Typ) || ispaddedfield(st, i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ispaddedfield reports whether the i'th field of struct type t is followed
|
||||
// by padding.
|
||||
func ispaddedfield(st *structtype, i int) bool {
|
||||
end := st.Size()
|
||||
if i+1 < len(st.Fields) {
|
||||
end = st.Fields[i+1].Offset
|
||||
}
|
||||
fd := st.Fields[i]
|
||||
return fd.Offset+fd.Typ.Size_ != end
|
||||
}
|
||||
|
||||
func structStr(fields []abi.StructField) string {
|
||||
repr := make([]byte, 0, 64)
|
||||
repr = append(repr, "struct {"...)
|
||||
for i, st := range fields {
|
||||
if i > 0 {
|
||||
repr = append(repr, ';')
|
||||
}
|
||||
repr = append(repr, ' ')
|
||||
if !st.Embedded_ {
|
||||
repr = append(repr, st.Name_...)
|
||||
repr = append(repr, ' ')
|
||||
}
|
||||
repr = append(repr, st.Typ.String()...)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
repr = append(repr, ' ')
|
||||
}
|
||||
repr = append(repr, '}')
|
||||
return string(repr)
|
||||
}
|
||||
|
||||
type rtypes struct {
|
||||
types []*abi.Type
|
||||
}
|
||||
|
||||
func (r *rtypes) findNamed(pkgPath string, name string) *Type {
|
||||
for _, typ := range r.types {
|
||||
if typ.TFlag&(abi.TFlagNamed|abi.TFlagUncommon) != 0 &&
|
||||
typ.Str_ == name && typ.Uncommon().PkgPath_ == pkgPath {
|
||||
return typ
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rtypes) findElem(kind abi.Kind, elem *Type, extra uintptr) *Type {
|
||||
for _, typ := range r.types {
|
||||
if typ.Kind() == kind && typ.Elem() == elem {
|
||||
switch kind {
|
||||
case abi.Chan:
|
||||
if uintptr(typ.ChanDir()) == extra {
|
||||
return typ
|
||||
}
|
||||
case abi.Array:
|
||||
if uintptr(typ.Len()) == extra {
|
||||
return typ
|
||||
}
|
||||
default:
|
||||
return typ
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rtypes) findMap(key, elem *Type) *Type {
|
||||
for _, typ := range r.types {
|
||||
if typ.Kind() == abi.Map {
|
||||
if mt := typ.MapType(); mt.Key == key && mt.Elem == elem {
|
||||
return typ
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func eqFields(s1, s2 []abi.StructField) bool {
|
||||
n := len(s1)
|
||||
if n != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
f1, f2 := s1[i], s2[i]
|
||||
if f1.Name_ != f2.Name_ || f1.Embedded_ != f2.Embedded_ || f1.Typ != f2.Typ {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *rtypes) findStruct(pkgPath string, size uintptr, fields []abi.StructField) *Type {
|
||||
for _, typ := range r.types {
|
||||
if typ.Kind() == abi.Struct && typ.Size() == size {
|
||||
if st := typ.StructType(); (st.IsClosure() || st.PkgPath_ == pkgPath) && eqFields(st.Fields, fields) {
|
||||
return typ
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func eqImethods(s1, s2 []Imethod) bool {
|
||||
n := len(s1)
|
||||
if n != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
f1, f2 := s1[i], s2[i]
|
||||
if f1.Name_ != f2.Name_ || f1.Typ_ != f2.Typ_ {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *rtypes) findInterface(pkgPath string, methods []Imethod) *abi.InterfaceType {
|
||||
for _, typ := range r.types {
|
||||
if typ.Kind() == abi.Interface {
|
||||
if it := typ.InterfaceType(); it.PkgPath_ == pkgPath && eqImethods(it.Methods, methods) {
|
||||
return it
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func eqTypes(s1, s2 []*Type) bool {
|
||||
n := len(s1)
|
||||
if n != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
if s1[i] != s2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *rtypes) findFunc(in, out []*Type, variadic bool) *abi.FuncType {
|
||||
for _, typ := range r.types {
|
||||
if typ.Kind() == abi.Func {
|
||||
if ft := typ.FuncType(); ft.Variadic() == variadic && eqTypes(ft.In, in) && eqTypes(ft.Out, out) {
|
||||
return ft
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rtypes) addType(typ *Type) {
|
||||
r.types = append(r.types, typ)
|
||||
}
|
||||
|
||||
var rtypeList rtypes
|
||||
|
||||
// hashString computes the Fowler–Noll–Vo hash of s.
|
||||
func hashString(s string) uint32 {
|
||||
var h uint32
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
Reference in New Issue
Block a user