Revert "runtime: map; llgo/ssa: MapUpdate"

This commit is contained in:
xushiwei
2024-06-14 22:50:23 +08:00
committed by GitHub
parent 78b8455bba
commit 5e45e38481
26 changed files with 149 additions and 3623 deletions

View File

@@ -63,8 +63,7 @@ jobs:
run: go install ./... run: go install ./...
- name: LLGO tests - name: LLGO tests
if: false if: matrix.os != 'ubuntu-latest'
#if matrix.os != 'ubuntu-latest'
run: | run: |
echo "Test result on ${{ matrix.os }} with LLVM ${{ matrix.llvm }}" > result.md echo "Test result on ${{ matrix.os }} with LLVM ${{ matrix.llvm }}" > result.md
LLGOROOT=$PWD bash .github/workflows/test_llgo.sh LLGOROOT=$PWD bash .github/workflows/test_llgo.sh

5
c/c.go
View File

@@ -46,7 +46,7 @@ type integer interface {
func Str(string) *Char func Str(string) *Char
// llgo:link Advance llgo.advance // llgo:link Advance llgo.advance
func Advance[PtrT any, I integer](ptr PtrT, offset I) PtrT { return ptr } func Advance[PtrT any](ptr PtrT, offset int) PtrT { return ptr }
// llgo:link Index llgo.index // llgo:link Index llgo.index
func Index[T any, I integer](ptr *T, offset I) T { return *ptr } func Index[T any, I integer](ptr *T, offset I) T { return *ptr }
@@ -66,9 +66,6 @@ func Free(ptr Pointer)
//go:linkname Memcpy C.memcpy //go:linkname Memcpy C.memcpy
func Memcpy(dst, src Pointer, n uintptr) Pointer func Memcpy(dst, src Pointer, n uintptr) Pointer
//go:linkname Memmove C.memmove
func Memmove(dst, src Pointer, n uintptr) Pointer
//go:linkname Memset C.memset //go:linkname Memset C.memset
func Memset(s Pointer, c Int, n uintptr) Pointer func Memset(s Pointer, c Int, n uintptr) Pointer

View File

@@ -4,8 +4,6 @@ source_filename = "main"
@"main.init$guard" = global ptr null @"main.init$guard" = global ptr null
@__llgo_argc = global ptr null @__llgo_argc = global ptr null
@__llgo_argv = global ptr null @__llgo_argv = global ptr null
@"map[_llgo_int]_llgo_int" = linkonce global ptr null
@_llgo_int = linkonce global ptr null
@0 = private unnamed_addr constant [10 x i8] c"Hello %d\0A\00", align 1 @0 = private unnamed_addr constant [10 x i8] c"Hello %d\0A\00", align 1
define void @main.init() { define void @main.init() {
@@ -15,7 +13,6 @@ _llgo_0:
_llgo_1: ; preds = %_llgo_0 _llgo_1: ; preds = %_llgo_0
store i1 true, ptr @"main.init$guard", align 1 store i1 true, ptr @"main.init$guard", align 1
call void @"main.init$after"()
br label %_llgo_2 br label %_llgo_2
_llgo_2: ; preds = %_llgo_1, %_llgo_0 _llgo_2: ; preds = %_llgo_1, %_llgo_0
@@ -29,17 +26,7 @@ _llgo_0:
call void @"github.com/goplus/llgo/internal/runtime.init"() call void @"github.com/goplus/llgo/internal/runtime.init"()
call void @main.init() call void @main.init()
%2 = call ptr @"github.com/goplus/llgo/internal/runtime.MakeSmallMap"() %2 = call ptr @"github.com/goplus/llgo/internal/runtime.MakeSmallMap"()
%3 = load ptr, ptr @"map[_llgo_int]_llgo_int", align 8 %3 = call i32 (ptr, ...) @printf(ptr @0, <null operand!>)
%4 = alloca i8, i64 48, align 1
store ptr %2, ptr %4, align 8
%5 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr %3, ptr %4, i64 23)
store i64 100, ptr %5, align 4
%6 = load ptr, ptr @"map[_llgo_int]_llgo_int", align 8
%7 = alloca i8, i64 48, align 1
store ptr %2, ptr %7, align 8
%8 = call ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr %6, ptr %7, i64 7)
store i64 29, ptr %8, align 4
%9 = call i32 (ptr, ...) @printf(ptr @0, <null operand!>)
ret i32 0 ret i32 0
} }
@@ -47,37 +34,4 @@ declare void @"github.com/goplus/llgo/internal/runtime.init"()
declare ptr @"github.com/goplus/llgo/internal/runtime.MakeSmallMap"() declare ptr @"github.com/goplus/llgo/internal/runtime.MakeSmallMap"()
define void @"main.init$after"() {
_llgo_0:
%0 = load ptr, ptr @_llgo_int, align 8
%1 = icmp eq ptr %0, null
br i1 %1, label %_llgo_1, label %_llgo_2
_llgo_1: ; preds = %_llgo_0
%2 = call ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64 2)
store ptr %2, ptr @_llgo_int, align 8
br label %_llgo_2
_llgo_2: ; preds = %_llgo_1, %_llgo_0
%3 = load ptr, ptr @_llgo_int, align 8
%4 = load ptr, ptr @_llgo_int, align 8
%5 = load ptr, ptr @"map[_llgo_int]_llgo_int", align 8
%6 = icmp eq ptr %5, null
br i1 %6, label %_llgo_3, label %_llgo_4
_llgo_3: ; preds = %_llgo_2
%7 = call ptr @"github.com/goplus/llgo/internal/runtime.MapOf"(ptr %3, ptr %4)
store ptr %7, ptr @"map[_llgo_int]_llgo_int", align 8
br label %_llgo_4
_llgo_4: ; preds = %_llgo_3, %_llgo_2
ret void
}
declare ptr @"github.com/goplus/llgo/internal/runtime.Basic"(i64)
declare ptr @"github.com/goplus/llgo/internal/runtime.MapOf"(ptr, ptr)
declare ptr @"github.com/goplus/llgo/internal/runtime.MapAssign"(ptr, ptr, ptr)
declare i32 @printf(ptr, ...) declare i32 @printf(ptr, ...)

View File

@@ -170,24 +170,6 @@ type MapType struct {
Flags uint32 Flags uint32
} }
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself
return mt.Flags&1 != 0
}
func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself
return mt.Flags&2 != 0
}
func (mt *MapType) ReflexiveKey() bool { // true if k==k for all keys
return mt.Flags&4 != 0
}
func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
return mt.Flags&8 != 0
}
func (mt *MapType) HashMightPanic() bool { // true if hash function might panic
return mt.Flags&16 != 0
}
type PtrType struct { type PtrType struct {
Type Type
Elem *Type // pointer element (pointed at) type Elem *Type // pointer element (pointed at) type

View File

@@ -1,368 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"unsafe"
"github.com/goplus/llgo/internal/abi"
"github.com/goplus/llgo/internal/runtime/c"
)
const (
c0 = uintptr((8-goarchPtrSize)/4*2860486313 + (goarchPtrSize-4)/4*33054211828000289)
c1 = uintptr((8-goarchPtrSize)/4*3267000013 + (goarchPtrSize-4)/4*23344194077549503)
)
/*
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
return h
}
func memhash8(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 1)
}
func memhash16(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 2)
}
func memhash128(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 16)
}
/*
//go:nosplit
func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr {
ptr := getclosureptr()
size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
return memhash(p, h, size)
}
*/
func memhash(p unsafe.Pointer, h, s uintptr) uintptr {
h ^= c0
for s > 0 {
s--
h = h*c1 + uintptr(*(*uint8)(c.Advance(p, s)))
}
return h
}
func memhash32(p unsafe.Pointer, h uintptr) uintptr {
return (h^c0)*c1 + uintptr(*(*uint32)(p))
}
func memhash64(p unsafe.Pointer, h uintptr) uintptr {
return (h^c0)*c1 + uintptr(*(*uint64)(p))
}
func strhash(p unsafe.Pointer, h uintptr) uintptr {
x := (*String)(p)
return memhash(x.data, h, uintptr(x.len))
}
// NOTE: Because NaN != NaN, a map can contain any
// number of (mostly useless) entries keyed with NaNs.
// To avoid long hash chains, we assign a random number
// as the hash value for a NaN.
func f32hash(p unsafe.Pointer, h uintptr) uintptr {
f := *(*float32)(p)
switch {
case f == 0:
return c1 * (c0 ^ h) // +0, -0
case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
default:
return memhash(p, h, 4)
}
}
func f64hash(p unsafe.Pointer, h uintptr) uintptr {
f := *(*float64)(p)
switch {
case f == 0:
return c1 * (c0 ^ h) // +0, -0
case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN
default:
return memhash(p, h, 8)
}
}
func c64hash(p unsafe.Pointer, h uintptr) uintptr {
x := (*[2]float32)(p)
return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
}
func c128hash(p unsafe.Pointer, h uintptr) uintptr {
x := (*[2]float64)(p)
return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
}
func interhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*iface)(p)
tab := a.tab
if tab == nil {
return h
}
t := tab._type
if t.Equal == nil {
// Check hashability here. We could do this check inside
// typehash, but we want to report the topmost type in
// the error text (e.g. in a struct with a field of slice type
// we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + t.Name()))
}
if isDirectIface(t) {
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
return c1 * typehash(t, a.data, h^c0)
}
}
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*eface)(p)
t := a._type
if t == nil {
return h
}
if t.Equal == nil {
// See comment in interhash above.
panic(errorString("hash of unhashable type " + t.Name()))
}
if isDirectIface(t) {
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
return c1 * typehash(t, a.data, h^c0)
}
}
// typehash computes the hash of the object of type t at address p.
// h is the seed.
// This function is seldom used. Most maps use for hashing either
// fixed functions (e.g. f32hash) or compiler-generated functions
// (e.g. for a type like struct { x, y string }). This implementation
// is slower but more general and is used for hashing interface types
// (called from interhash or nilinterhash, above) or for hashing in
// maps generated by reflect.MapOf (reflect_typehash, below).
// Note: this function must match the compiler generated
// functions exactly. See issue 37716.
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
if t.TFlag&abi.TFlagRegularMemory != 0 {
// Handle ptr sizes specially, see issue 37086.
switch t.Size_ {
case 4:
return memhash32(p, h)
case 8:
return memhash64(p, h)
default:
return memhash(p, h, t.Size_)
}
}
switch t.Kind() {
case abi.Float32:
return f32hash(p, h)
case abi.Float64:
return f64hash(p, h)
case abi.Complex64:
return c64hash(p, h)
case abi.Complex128:
return c128hash(p, h)
case abi.String:
return strhash(p, h)
case abi.Interface:
i := (*interfacetype)(unsafe.Pointer(t))
if len(i.Methods) == 0 {
return nilinterhash(p, h)
}
return interhash(p, h)
case abi.Array:
a := (*abi.ArrayType)(unsafe.Pointer(t))
for i := uintptr(0); i < a.Len; i++ {
h = typehash(a.Elem, add(p, i*a.Elem.Size_), h)
}
return h
case abi.Struct:
s := (*abi.StructType)(unsafe.Pointer(t))
for _, f := range s.Fields {
/* TODO(xsw): skip blank field
if f.Name.IsBlank() {
continue
}
*/
h = typehash(f.Typ, add(p, f.Offset), h)
}
return h
default:
// Should never happen, as typehash should only be called
// with comparable types.
panic(errorString("hash of unhashable type " + t.Name()))
}
}
/*
//go:linkname reflect_typehash reflect.typehash
func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
return typehash(t, p, h)
}
func memequal0(p, q unsafe.Pointer) bool {
return true
}
func memequal8(p, q unsafe.Pointer) bool {
return *(*int8)(p) == *(*int8)(q)
}
func memequal16(p, q unsafe.Pointer) bool {
return *(*int16)(p) == *(*int16)(q)
}
func memequal32(p, q unsafe.Pointer) bool {
return *(*int32)(p) == *(*int32)(q)
}
func memequal64(p, q unsafe.Pointer) bool {
return *(*int64)(p) == *(*int64)(q)
}
func memequal128(p, q unsafe.Pointer) bool {
return *(*[2]int64)(p) == *(*[2]int64)(q)
}
func f32equal(p, q unsafe.Pointer) bool {
return *(*float32)(p) == *(*float32)(q)
}
func f64equal(p, q unsafe.Pointer) bool {
return *(*float64)(p) == *(*float64)(q)
}
func c64equal(p, q unsafe.Pointer) bool {
return *(*complex64)(p) == *(*complex64)(q)
}
func c128equal(p, q unsafe.Pointer) bool {
return *(*complex128)(p) == *(*complex128)(q)
}
func strequal(p, q unsafe.Pointer) bool {
return *(*string)(p) == *(*string)(q)
}
func interequal(p, q unsafe.Pointer) bool {
x := *(*iface)(p)
y := *(*iface)(q)
return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data)
}
func nilinterequal(p, q unsafe.Pointer) bool {
x := *(*eface)(p)
y := *(*eface)(q)
return x._type == y._type && efaceeq(x._type, x.data, y.data)
}
func efaceeq(t *_type, x, y unsafe.Pointer) bool {
if t == nil {
return true
}
eq := t.Equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.Name()))
}
if isDirectIface(t) {
// Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof.
// Maps and funcs are not comparable, so they can't reach here.
// Ptrs, chans, and single-element items can be compared directly using ==.
return x == y
}
return eq(x, y)
}
func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
if tab == nil {
return true
}
t := tab._type
eq := t.Equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.Name()))
}
if isDirectIface(t) {
// See comment in efaceeq.
return x == y
}
return eq(x, y)
}
// Testing adapters for hash quality tests (see hash_test.go)
func stringHash(s string, seed uintptr) uintptr {
return strhash(unsafe.Pointer(&s), seed)
}
func bytesHash(b []byte, seed uintptr) uintptr {
s := (*slice)(unsafe.Pointer(&b))
return memhash(s.data, seed, uintptr(s.len))
}
func int32Hash(i uint32, seed uintptr) uintptr {
return memhash32(unsafe.Pointer(&i), seed)
}
func int64Hash(i uint64, seed uintptr) uintptr {
return memhash64(unsafe.Pointer(&i), seed)
}
func efaceHash(i any, seed uintptr) uintptr {
return nilinterhash(unsafe.Pointer(&i), seed)
}
func ifaceHash(i interface {
F()
}, seed uintptr) uintptr {
return interhash(unsafe.Pointer(&i), seed)
}
/*
const hashRandomBytes = goarch.PtrSize / 4 * 64
// used in asm_{386,amd64,arm64}.s to seed the hash function
var aeskeysched [hashRandomBytes]byte
// used in hash{32,64}.go to seed the hash function
var hashkey [4]uintptr
func alginit() {
// Install AES hash algorithms if the instructions needed are present.
if (GOARCH == "386" || GOARCH == "amd64") &&
cpu.X86.HasAES && // AESENC
cpu.X86.HasSSSE3 && // PSHUFB
cpu.X86.HasSSE41 { // PINSR{D,Q}
initAlgAES()
return
}
if GOARCH == "arm64" && cpu.ARM64.HasAES {
initAlgAES()
return
}
getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
hashkey[0] |= 1 // make sure these numbers are odd
hashkey[1] |= 1
hashkey[2] |= 1
hashkey[3] |= 1
}
func initAlgAES() {
useAeshash = true
// Initialize with random data so hash collisions will be hard to engineer.
getRandomData(aeskeysched[:])
}
// Note: These routines perform the read with a native endianness.
func readUnaligned32(p unsafe.Pointer) uint32 {
q := (*[4]byte)(p)
if goarch.BigEndian {
return uint32(q[3]) | uint32(q[2])<<8 | uint32(q[1])<<16 | uint32(q[0])<<24
}
return uint32(q[0]) | uint32(q[1])<<8 | uint32(q[2])<<16 | uint32(q[3])<<24
}
func readUnaligned64(p unsafe.Pointer) uint64 {
q := (*[8]byte)(p)
if goarch.BigEndian {
return uint64(q[7]) | uint64(q[6])<<8 | uint64(q[5])<<16 | uint64(q[4])<<24 |
uint64(q[3])<<32 | uint64(q[2])<<40 | uint64(q[1])<<48 | uint64(q[0])<<56
}
return uint64(q[0]) | uint64(q[1])<<8 | uint64(q[2])<<16 | uint64(q[3])<<24 | uint64(q[4])<<32 | uint64(q[5])<<40 | uint64(q[6])<<48 | uint64(q[7])<<56
}
*/

View File

@@ -30,15 +30,11 @@ type (
FilePtr = unsafe.Pointer FilePtr = unsafe.Pointer
) )
type integer interface {
~int | ~uint | ~uintptr | ~int32 | ~uint32 | ~int64 | ~uint64
}
//go:linkname Str llgo.cstr //go:linkname Str llgo.cstr
func Str(string) *Char func Str(string) *Char
// llgo:link Advance llgo.advance // llgo:link Advance llgo.advance
func Advance[PtrT any, I integer](ptr PtrT, offset I) PtrT { return ptr } func Advance[PtrT any](ptr PtrT, offset int) PtrT { return ptr }
//go:linkname Alloca llgo.alloca //go:linkname Alloca llgo.alloca
func Alloca(size uintptr) Pointer func Alloca(size uintptr) Pointer

View File

@@ -1,334 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
/*
import "internal/bytealg"
// The Error interface identifies a run time error.
type Error interface {
error
// RuntimeError is a no-op function but
// serves to distinguish types that are run time
// errors from ordinary errors: a type is a
// run time error if it has a RuntimeError method.
RuntimeError()
}
// A TypeAssertionError explains a failed type assertion.
type TypeAssertionError struct {
_interface *_type
concrete *_type
asserted *_type
missingMethod string // one method needed by Interface, missing from Concrete
}
func (*TypeAssertionError) RuntimeError() {}
func (e *TypeAssertionError) Error() string {
inter := "interface"
if e._interface != nil {
inter = toRType(e._interface).string()
}
as := toRType(e.asserted).string()
if e.concrete == nil {
return "interface conversion: " + inter + " is nil, not " + as
}
cs := toRType(e.concrete).string()
if e.missingMethod == "" {
msg := "interface conversion: " + inter + " is " + cs + ", not " + as
if cs == as {
// provide slightly clearer error message
if toRType(e.concrete).pkgpath() != toRType(e.asserted).pkgpath() {
msg += " (types from different packages)"
} else {
msg += " (types from different scopes)"
}
}
return msg
}
return "interface conversion: " + cs + " is not " + as +
": missing method " + e.missingMethod
}
// itoa converts val to a decimal representation. The result is
// written somewhere within buf and the location of the result is returned.
// buf must be at least 20 bytes.
//
//go:nosplit
func itoa(buf []byte, val uint64) []byte {
i := len(buf) - 1
for val >= 10 {
buf[i] = byte(val%10 + '0')
i--
val /= 10
}
buf[i] = byte(val + '0')
return buf[i:]
}
// An errorString represents a runtime error described by a single string.
type errorString string
func (e errorString) RuntimeError() {}
func (e errorString) Error() string {
return "runtime error: " + string(e)
}
type errorAddressString struct {
msg string // error message
addr uintptr // memory address where the error occurred
}
func (e errorAddressString) RuntimeError() {}
func (e errorAddressString) Error() string {
return "runtime error: " + e.msg
}
// Addr returns the memory address where a fault occurred.
// The address provided is best-effort.
// The veracity of the result may depend on the platform.
// Errors providing this method will only be returned as
// a result of using runtime/debug.SetPanicOnFault.
func (e errorAddressString) Addr() uintptr {
return e.addr
}
*/
// plainError represents a runtime error described a string without
// the prefix "runtime error: " after invoking errorString.Error().
// See Issue #14965.
type plainError string
func (e plainError) RuntimeError() {}
func (e plainError) Error() string {
return string(e)
}
/*
// A boundsError represents an indexing or slicing operation gone wrong.
type boundsError struct {
x int64
y int
// Values in an index or slice expression can be signed or unsigned.
// That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1.
// Instead, we keep track of whether x should be interpreted as signed or unsigned.
// y is known to be nonnegative and to fit in an int.
signed bool
code boundsErrorCode
}
type boundsErrorCode uint8
const (
boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed
boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed
boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed
boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen)
boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed
boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed
boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen)
boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen)
boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed
// Note: in the above, len(s) and cap(s) are stored in y
)
// boundsErrorFmts provide error text for various out-of-bounds panics.
// Note: if you change these strings, you should adjust the size of the buffer
// in boundsError.Error below as well.
var boundsErrorFmts = [...]string{
boundsIndex: "index out of range [%x] with length %y",
boundsSliceAlen: "slice bounds out of range [:%x] with length %y",
boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y",
boundsSliceB: "slice bounds out of range [%x:%y]",
boundsSlice3Alen: "slice bounds out of range [::%x] with length %y",
boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y",
boundsSlice3B: "slice bounds out of range [:%x:%y]",
boundsSlice3C: "slice bounds out of range [%x:%y:]",
boundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x",
}
// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.
var boundsNegErrorFmts = [...]string{
boundsIndex: "index out of range [%x]",
boundsSliceAlen: "slice bounds out of range [:%x]",
boundsSliceAcap: "slice bounds out of range [:%x]",
boundsSliceB: "slice bounds out of range [%x:]",
boundsSlice3Alen: "slice bounds out of range [::%x]",
boundsSlice3Acap: "slice bounds out of range [::%x]",
boundsSlice3B: "slice bounds out of range [:%x:]",
boundsSlice3C: "slice bounds out of range [%x::]",
}
func (e boundsError) RuntimeError() {}
func appendIntStr(b []byte, v int64, signed bool) []byte {
if signed && v < 0 {
b = append(b, '-')
v = -v
}
var buf [20]byte
b = append(b, itoa(buf[:], uint64(v))...)
return b
}
func (e boundsError) Error() string {
fmt := boundsErrorFmts[e.code]
if e.signed && e.x < 0 {
fmt = boundsNegErrorFmts[e.code]
}
// max message length is 99: "runtime error: slice bounds out of range [::%x] with capacity %y"
// x can be at most 20 characters. y can be at most 19.
b := make([]byte, 0, 100)
b = append(b, "runtime error: "...)
for i := 0; i < len(fmt); i++ {
c := fmt[i]
if c != '%' {
b = append(b, c)
continue
}
i++
switch fmt[i] {
case 'x':
b = appendIntStr(b, e.x, e.signed)
case 'y':
b = appendIntStr(b, int64(e.y), true)
}
}
return string(b)
}
type stringer interface {
String() string
}
// printany prints an argument passed to panic.
// If panic is called with a value that has a String or Error method,
// it has already been converted into a string by preprintpanics.
func printany(i any) {
switch v := i.(type) {
case nil:
print("nil")
case bool:
print(v)
case int:
print(v)
case int8:
print(v)
case int16:
print(v)
case int32:
print(v)
case int64:
print(v)
case uint:
print(v)
case uint8:
print(v)
case uint16:
print(v)
case uint32:
print(v)
case uint64:
print(v)
case uintptr:
print(v)
case float32:
print(v)
case float64:
print(v)
case complex64:
print(v)
case complex128:
print(v)
case string:
print(v)
default:
printanycustomtype(i)
}
}
func printanycustomtype(i any) {
eface := efaceOf(&i)
typestring := toRType(eface._type).string()
switch eface._type.Kind_ {
case kindString:
print(typestring, `("`, *(*string)(eface.data), `")`)
case kindBool:
print(typestring, "(", *(*bool)(eface.data), ")")
case kindInt:
print(typestring, "(", *(*int)(eface.data), ")")
case kindInt8:
print(typestring, "(", *(*int8)(eface.data), ")")
case kindInt16:
print(typestring, "(", *(*int16)(eface.data), ")")
case kindInt32:
print(typestring, "(", *(*int32)(eface.data), ")")
case kindInt64:
print(typestring, "(", *(*int64)(eface.data), ")")
case kindUint:
print(typestring, "(", *(*uint)(eface.data), ")")
case kindUint8:
print(typestring, "(", *(*uint8)(eface.data), ")")
case kindUint16:
print(typestring, "(", *(*uint16)(eface.data), ")")
case kindUint32:
print(typestring, "(", *(*uint32)(eface.data), ")")
case kindUint64:
print(typestring, "(", *(*uint64)(eface.data), ")")
case kindUintptr:
print(typestring, "(", *(*uintptr)(eface.data), ")")
case kindFloat32:
print(typestring, "(", *(*float32)(eface.data), ")")
case kindFloat64:
print(typestring, "(", *(*float64)(eface.data), ")")
case kindComplex64:
print(typestring, *(*complex64)(eface.data))
case kindComplex128:
print(typestring, *(*complex128)(eface.data))
default:
print("(", typestring, ") ", eface.data)
}
}
// panicwrap generates a panic for a call to a wrapped value method
// with a nil pointer receiver.
//
// It is called from the generated wrapper code.
func panicwrap() {
pc := getcallerpc()
name := funcNameForPrint(funcname(findfunc(pc)))
// name is something like "main.(*T).F".
// We want to extract pkg ("main"), typ ("T"), and meth ("F").
// Do it by finding the parens.
i := bytealg.IndexByteString(name, '(')
if i < 0 {
throw("panicwrap: no ( in " + name)
}
pkg := name[:i-1]
if i+2 >= len(name) || name[i-1:i+2] != ".(*" {
throw("panicwrap: unexpected string after package name: " + name)
}
name = name[i+2:]
i = bytealg.IndexByteString(name, ')')
if i < 0 {
throw("panicwrap: no ) in " + name)
}
if i+2 >= len(name) || name[i:i+2] != ")." {
throw("panicwrap: unexpected string after type name: " + name)
}
typ := name[:i]
meth := name[i+2:]
panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer"))
}
*/

View File

@@ -1,343 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"unsafe"
)
const (
bigAlloc = 1 << (goarchPtrSize*8 - 6)
)
// implementation of new builtin
// compiler (both frontend and SSA backend) knows the signature
// of this function.
func newobject(typ *_type) unsafe.Pointer {
return AllocZ(typ.Size_)
}
/*
//go:linkname reflect_unsafe_New reflect.unsafe_New
func reflect_unsafe_New(typ *_type) unsafe.Pointer {
return mallocgc(typ.Size_, typ, true)
}
//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
return mallocgc(typ.Size_, typ, true)
}
*/
const mathMaxUintptr = ^uintptr(0)
// mathMulUintptr returns a * b and whether the multiplication overflowed.
// On supported platforms this is an intrinsic lowered by the compiler.
func mathMulUintptr(a, b uintptr) (uintptr, bool) {
if a|b < 1<<(4*goarchPtrSize) || a == 0 {
return a * b, false
}
overflow := b > mathMaxUintptr/a
return a * b, overflow
}
// newarray allocates an array of n elements of type typ.
func newarray(typ *_type, n int) unsafe.Pointer {
if n == 1 {
return AllocZ(typ.Size_)
}
mem, overflow := mathMulUintptr(typ.Size_, uintptr(n))
if overflow || n < 0 {
panic(plainError("runtime: allocation size out of range"))
}
return AllocZ(mem)
}
/*
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
return newarray(typ, n)
}
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
c := getMCache(mp)
if c == nil {
throw("profilealloc called without a P or outside bootstrapping")
}
c.nextSample = nextSample()
mProf_Malloc(x, size)
}
// nextSample returns the next sampling point for heap profiling. The goal is
// to sample allocations on average every MemProfileRate bytes, but with a
// completely random distribution over the allocation timeline; this
// corresponds to a Poisson process with parameter MemProfileRate. In Poisson
// processes, the distance between two samples follows the exponential
// distribution (exp(MemProfileRate)), so the best return value is a random
// number taken from an exponential distribution whose mean is MemProfileRate.
func nextSample() uintptr {
if MemProfileRate == 1 {
// Callers assign our return value to
// mcache.next_sample, but next_sample is not used
// when the rate is 1. So avoid the math below and
// just return something.
return 0
}
if GOOS == "plan9" {
// Plan 9 doesn't support floating point in note handler.
if gp := getg(); gp == gp.m.gsignal {
return nextSampleNoFP()
}
}
return uintptr(fastexprand(MemProfileRate))
}
// fastexprand returns a random number from an exponential distribution with
// the specified mean.
func fastexprand(mean int) int32 {
// Avoid overflow. Maximum possible step is
// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
switch {
case mean > 0x7000000:
mean = 0x7000000
case mean == 0:
return 0
}
// Take a random sample of the exponential distribution exp(-mean*x).
// The probability distribution function is mean*exp(-mean*x), so the CDF is
// p = 1 - exp(-mean*x), so
// q = 1 - p == exp(-mean*x)
// log_e(q) = -mean*x
// -log_e(q)/mean = x
// x = -log_e(q) * mean
// x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
const randomBitCount = 26
q := fastrandn(1<<randomBitCount) + 1
qlog := fastlog2(float64(q)) - randomBitCount
if qlog > 0 {
qlog = 0
}
const minusLog2 = -0.6931471805599453 // -ln(2)
return int32(qlog*(minusLog2*float64(mean))) + 1
}
// nextSampleNoFP is similar to nextSample, but uses older,
// simpler code to avoid floating point.
func nextSampleNoFP() uintptr {
// Set first allocation sample size.
rate := MemProfileRate
if rate > 0x3fffffff { // make 2*rate not overflow
rate = 0x3fffffff
}
if rate != 0 {
return uintptr(fastrandn(uint32(2 * rate)))
}
return 0
}
type persistentAlloc struct {
base *notInHeap
off uintptr
}
var globalAlloc struct {
mutex
persistentAlloc
}
// persistentChunkSize is the number of bytes we allocate when we grow
// a persistentAlloc.
const persistentChunkSize = 256 << 10
// persistentChunks is a list of all the persistent chunks we have
// allocated. The list is maintained through the first word in the
// persistent chunk. This is updated atomically.
var persistentChunks *notInHeap
// Wrapper around sysAlloc that can allocate small chunks.
// There is no associated free operation.
// Intended for things like function/type/debug-related persistent data.
// If align is 0, uses default align (currently 8).
// The returned memory will be zeroed.
// sysStat must be non-nil.
//
// Consider marking persistentalloc'd types not in heap by embedding
// runtime/internal/sys.NotInHeap.
func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
var p *notInHeap
systemstack(func() {
p = persistentalloc1(size, align, sysStat)
})
return unsafe.Pointer(p)
}
// Must run on system stack because stack growth can (re)invoke it.
// See issue 9174.
//
//go:systemstack
func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
const (
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
)
if size == 0 {
throw("persistentalloc: size == 0")
}
if align != 0 {
if align&(align-1) != 0 {
throw("persistentalloc: align is not a power of 2")
}
if align > _PageSize {
throw("persistentalloc: align is too large")
}
} else {
align = 8
}
if size >= maxBlock {
return (*notInHeap)(sysAlloc(size, sysStat))
}
mp := acquirem()
var persistent *persistentAlloc
if mp != nil && mp.p != 0 {
persistent = &mp.p.ptr().palloc
} else {
lock(&globalAlloc.mutex)
persistent = &globalAlloc.persistentAlloc
}
persistent.off = alignUp(persistent.off, align)
if persistent.off+size > persistentChunkSize || persistent.base == nil {
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
if persistent.base == nil {
if persistent == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
}
throw("runtime: cannot allocate memory")
}
// Add the new chunk to the persistentChunks list.
for {
chunks := uintptr(unsafe.Pointer(persistentChunks))
*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
break
}
}
persistent.off = alignUp(goarch.PtrSize, align)
}
p := persistent.base.add(persistent.off)
persistent.off += size
releasem(mp)
if persistent == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
}
if sysStat != &memstats.other_sys {
sysStat.add(int64(size))
memstats.other_sys.add(-int64(size))
}
return p
}
// inPersistentAlloc reports whether p points to memory allocated by
// persistentalloc. This must be nosplit because it is called by the
// cgo checker code, which is called by the write barrier code.
//
//go:nosplit
func inPersistentAlloc(p uintptr) bool {
chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
for chunk != 0 {
if p >= chunk && p < chunk+persistentChunkSize {
return true
}
chunk = *(*uintptr)(unsafe.Pointer(chunk))
}
return false
}
// linearAlloc is a simple linear allocator that pre-reserves a region
// of memory and then optionally maps that region into the Ready state
// as needed.
//
// The caller is responsible for locking.
type linearAlloc struct {
next uintptr // next free byte
mapped uintptr // one byte past end of mapped space
end uintptr // end of reserved space
mapMemory bool // transition memory from Reserved to Ready if true
}
func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
if base+size < base {
// Chop off the last byte. The runtime isn't prepared
// to deal with situations where the bounds could overflow.
// Leave that memory reserved, though, so we don't map it
// later.
size -= 1
}
l.next, l.mapped = base, base
l.end = base + size
l.mapMemory = mapMemory
}
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
p := alignUp(l.next, align)
if p+size > l.end {
return nil
}
l.next = p + size
if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
if l.mapMemory {
// Transition from Reserved to Prepared to Ready.
n := pEnd - l.mapped
sysMap(unsafe.Pointer(l.mapped), n, sysStat)
sysUsed(unsafe.Pointer(l.mapped), n, n)
}
l.mapped = pEnd
}
return unsafe.Pointer(p)
}
// notInHeap is off-heap memory allocated by a lower-level allocator
// like sysAlloc or persistentAlloc.
//
// In general, it's better to use real types which embed
// runtime/internal/sys.NotInHeap, but this serves as a generic type
// for situations where that isn't possible (like in the allocators).
//
// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
type notInHeap struct{ _ sys.NotInHeap }
func (p *notInHeap) add(bytes uintptr) *notInHeap {
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
}
// computeRZlog computes the size of the redzone.
// Refer to the implementation of the compiler-rt.
func computeRZlog(userSize uintptr) uintptr {
switch {
case userSize <= (64 - 16):
return 16 << 0
case userSize <= (128 - 32):
return 16 << 1
case userSize <= (512 - 64):
return 16 << 2
case userSize <= (4096 - 128):
return 16 << 3
case userSize <= (1<<14)-256:
return 16 << 4
case userSize <= (1<<15)-512:
return 16 << 5
case userSize <= (1<<16)-1024:
return 16 << 6
default:
return 16 << 7
}
}
*/

View File

@@ -59,12 +59,6 @@ import (
"github.com/goplus/llgo/internal/abi" "github.com/goplus/llgo/internal/abi"
) )
type maptype = abi.MapType
const (
goarchPtrSize = unsafe.Sizeof(uintptr(0))
)
const ( const (
// Maximum number of key/elem pairs a bucket can hold. // Maximum number of key/elem pairs a bucket can hold.
bucketCntBits = abi.MapBucketCountBits bucketCntBits = abi.MapBucketCountBits
@@ -80,9 +74,8 @@ const (
// Must fit in a uint8. // Must fit in a uint8.
// Fast versions cannot handle big elems - the cutoff size for // Fast versions cannot handle big elems - the cutoff size for
// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem. // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
// maxKeySize = abi.MapMaxKeyBytes
// maxKeySize = abi.MapMaxKeyBytes maxElemSize = abi.MapMaxElemBytes
// maxElemSize = abi.MapMaxElemBytes
// data offset should be the size of the bmap struct, but needs to be // data offset should be the size of the bmap struct, but needs to be
// aligned correctly. For amd64p32 this means 64-bit alignment // aligned correctly. For amd64p32 this means 64-bit alignment
@@ -186,12 +179,11 @@ type hiter struct {
bucket uintptr bucket uintptr
checkBucket uintptr checkBucket uintptr
} }
*/
// bucketShift returns 1<<b, optimized for code generation. // bucketShift returns 1<<b, optimized for code generation.
func bucketShift(b uint8) uintptr { func bucketShift(b uint8) uintptr {
// Masking the shift amount allows overflow checks to be elided. // Masking the shift amount allows overflow checks to be elided.
return uintptr(1) << (b & uint8(goarchPtrSize*8-1)) return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
} }
// bucketMask returns 1<<b - 1, optimized for code generation. // bucketMask returns 1<<b - 1, optimized for code generation.
@@ -201,7 +193,7 @@ func bucketMask(b uint8) uintptr {
// tophash calculates the tophash value for hash. // tophash calculates the tophash value for hash.
func tophash(hash uintptr) uint8 { func tophash(hash uintptr) uint8 {
top := uint8(hash >> (goarchPtrSize*8 - 8)) top := uint8(hash >> (goarch.PtrSize*8 - 8))
if top < minTopHash { if top < minTopHash {
top += minTopHash top += minTopHash
} }
@@ -214,18 +206,16 @@ func evacuated(b *bmap) bool {
} }
func (b *bmap) overflow(t *maptype) *bmap { func (b *bmap) overflow(t *maptype) *bmap {
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarchPtrSize)) return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
} }
func (b *bmap) setoverflow(t *maptype, ovf *bmap) { func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarchPtrSize)) = ovf *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
} }
/*
func (b *bmap) keys() unsafe.Pointer { func (b *bmap) keys() unsafe.Pointer {
return add(unsafe.Pointer(b), dataOffset) return add(unsafe.Pointer(b), dataOffset)
} }
*/
// incrnoverflow increments h.noverflow. // incrnoverflow increments h.noverflow.
// noverflow counts the number of overflow buckets. // noverflow counts the number of overflow buckets.
@@ -290,7 +280,6 @@ func (h *hmap) createOverflow() {
} }
} }
/*
func makemap64(t *maptype, hint int64, h *hmap) *hmap { func makemap64(t *maptype, hint int64, h *hmap) *hmap {
if int64(int(hint)) != hint { if int64(int(hint)) != hint {
hint = 0 hint = 0
@@ -308,14 +297,15 @@ func makemap_small() *hmap {
return h return h
} }
/*
// makemap implements Go map creation for make(map[k]v, hint). // makemap implements Go map creation for make(map[k]v, hint).
// If the compiler has determined that the map or the first bucket // If the compiler has determined that the map or the first bucket
// can be created on the stack, h and/or bucket may be non-nil. // can be created on the stack, h and/or bucket may be non-nil.
// If h != nil, the map can be created directly in h. // If h != nil, the map can be created directly in h.
// If h.buckets != nil, bucket pointed to can be used as the first bucket. // If h.buckets != nil, bucket pointed to can be used as the first bucket.
func makemap(t *maptype, hint int, h *hmap) *hmap { func makemap(t *maptype, hint int, h *hmap) *hmap {
mem, overflow := mathMulUintptr(uintptr(hint), t.Bucket.Size_) mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
if overflow || mem > bigAlloc { if overflow || mem > maxAlloc {
hint = 0 hint = 0
} }
@@ -399,7 +389,6 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
return buckets, nextOverflow return buckets, nextOverflow
} }
/*
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead // mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the elem type if // it will return a reference to the zero object for the elem type if
// the key is not in the map. // the key is not in the map.
@@ -586,13 +575,24 @@ func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Point
} }
return e, true return e, true
} }
*/
// Like mapaccess, but allocates a slot for the key if it is not present in the map. // Like mapaccess, but allocates a slot for the key if it is not present in the map.
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil { if h == nil {
panic(plainError("assignment to entry in nil map")) panic(plainError("assignment to entry in nil map"))
} }
if raceenabled {
callerpc := getcallerpc()
pc := abi.FuncPCABIInternal(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
}
if msanenabled {
msanread(key, t.Key.Size_)
}
if asanenabled {
asanread(key, t.Key.Size_)
}
if h.flags&hashWriting != 0 { if h.flags&hashWriting != 0 {
fatal("concurrent map writes") fatal("concurrent map writes")
} }
@@ -694,7 +694,6 @@ done:
return elem return elem
} }
/*
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil { if raceenabled && h != nil {
callerpc := getcallerpc() callerpc := getcallerpc()
@@ -1056,7 +1055,6 @@ func mapclear(t *maptype, h *hmap) {
} }
h.flags &^= hashWriting h.flags &^= hashWriting
} }
*/
func hashGrow(t *maptype, h *hmap) { func hashGrow(t *maptype, h *hmap) {
// If we've hit the load factor, get bigger. // If we've hit the load factor, get bigger.
@@ -1307,7 +1305,6 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
} }
} }
/*
// Reflect stubs. Called from ../reflect/asm_*.s // Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap //go:linkname reflect_makemap reflect.makemap

View File

@@ -1,349 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: write barriers.
//
// For the concurrent garbage collector, the Go compiler implements
// updates to pointer-valued fields that may be in heap objects by
// emitting calls to write barriers. The main write barrier for
// individual pointer writes is gcWriteBarrier and is implemented in
// assembly. This file contains write barrier entry points for bulk
// operations. See also mwbbuf.go.
package runtime
import (
"unsafe"
"github.com/goplus/llgo/internal/abi"
)
/*
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"unsafe"
)
// Go uses a hybrid barrier that combines a Yuasa-style deletion
// barrier—which shades the object whose reference is being
// overwritten—with Dijkstra insertion barrier—which shades the object
// whose reference is being written. The insertion part of the barrier
// is necessary while the calling goroutine's stack is grey. In
// pseudocode, the barrier is:
//
// writePointer(slot, ptr):
// shade(*slot)
// if current stack is grey:
// shade(ptr)
// *slot = ptr
//
// slot is the destination in Go code.
// ptr is the value that goes into the slot in Go code.
//
// Shade indicates that it has seen a white pointer by adding the referent
// to wbuf as well as marking it.
//
// The two shades and the condition work together to prevent a mutator
// from hiding an object from the garbage collector:
//
// 1. shade(*slot) prevents a mutator from hiding an object by moving
// the sole pointer to it from the heap to its stack. If it attempts
// to unlink an object from the heap, this will shade it.
//
// 2. shade(ptr) prevents a mutator from hiding an object by moving
// the sole pointer to it from its stack into a black object in the
// heap. If it attempts to install the pointer into a black object,
// this will shade it.
//
// 3. Once a goroutine's stack is black, the shade(ptr) becomes
// unnecessary. shade(ptr) prevents hiding an object by moving it from
// the stack to the heap, but this requires first having a pointer
// hidden on the stack. Immediately after a stack is scanned, it only
// points to shaded objects, so it's not hiding anything, and the
// shade(*slot) prevents it from hiding any other pointers on its
// stack.
//
// For a detailed description of this barrier and proof of
// correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
//
//
//
// Dealing with memory ordering:
//
// Both the Yuasa and Dijkstra barriers can be made conditional on the
// color of the object containing the slot. We chose not to make these
// conditional because the cost of ensuring that the object holding
// the slot doesn't concurrently change color without the mutator
// noticing seems prohibitive.
//
// Consider the following example where the mutator writes into
// a slot and then loads the slot's mark bit while the GC thread
// writes to the slot's mark bit and then as part of scanning reads
// the slot.
//
// Initially both [slot] and [slotmark] are 0 (nil)
// Mutator thread GC thread
// st [slot], ptr st [slotmark], 1
//
// ld r1, [slotmark] ld r2, [slot]
//
// Without an expensive memory barrier between the st and the ld, the final
// result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
// example of what can happen when loads are allowed to be reordered with older
// stores (avoiding such reorderings lies at the heart of the classic
// Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
// barriers, which will slow down both the mutator and the GC, we always grey
// the ptr object regardless of the slot's color.
//
// Another place where we intentionally omit memory barriers is when
// accessing mheap_.arena_used to check if a pointer points into the
// heap. On relaxed memory machines, it's possible for a mutator to
// extend the size of the heap by updating arena_used, allocate an
// object from this new region, and publish a pointer to that object,
// but for tracing running on another processor to observe the pointer
// but use the old value of arena_used. In this case, tracing will not
// mark the object, even though it's reachable. However, the mutator
// is guaranteed to execute a write barrier when it publishes the
// pointer, so it will take care of marking the object. A general
// consequence of this is that the garbage collector may cache the
// value of mheap_.arena_used. (See issue #9984.)
//
//
// Stack writes:
//
// The compiler omits write barriers for writes to the current frame,
// but if a stack pointer has been passed down the call stack, the
// compiler will generate a write barrier for writes through that
// pointer (because it doesn't know it's not a heap pointer).
//
//
// Global writes:
//
// The Go garbage collector requires write barriers when heap pointers
// are stored in globals. Many garbage collectors ignore writes to
// globals and instead pick up global -> heap pointers during
// termination. This increases pause time, so we instead rely on write
// barriers for writes to globals so that we don't have to rescan
// global during mark termination.
//
//
// Publication ordering:
//
// The write barrier is *pre-publication*, meaning that the write
// barrier happens prior to the *slot = ptr write that may make ptr
// reachable by some goroutine that currently cannot reach it.
//
//
// Signal handler pointer writes:
//
// In general, the signal handler cannot safely invoke the write
// barrier because it may run without a P or even during the write
// barrier.
//
// There is exactly one exception: profbuf.go omits a barrier during
// signal handler profile logging. That's safe only because of the
// deletion barrier. See profbuf.go for a detailed argument. If we
// remove the deletion barrier, we'll have to work out a new way to
// handle the profile logging.
*/
// typedmemmove copies a value of type typ to dst from src.
// Must be nosplit, see #16026.
//
// TODO: Perfect for go:nosplitrec since we can't have a safe point
// anywhere in the bulk barrier or memmove.
//
//go:nosplit
func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) {
if dst == src {
return
}
// There's a race here: if some other goroutine can write to
// src, it may change some pointer in src after we've
// performed the write barrier but before we perform the
// memory copy. This safe because the write performed by that
// other goroutine must also be accompanied by a write
// barrier, so at worst we've unnecessarily greyed the old
// pointer that was in src.
memmove(dst, src, typ.Size_)
}
/*
// wbZero performs the write barrier operations necessary before
// zeroing a region of memory at address dst of type typ.
// Does not actually do the zeroing.
//
//go:nowritebarrierrec
//go:nosplit
func wbZero(typ *_type, dst unsafe.Pointer) {
bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes)
}
// wbMove performs the write barrier operations necessary before
// copying a region of memory from src to dst of type typ.
// Does not actually do the copying.
//
//go:nowritebarrierrec
//go:nosplit
func wbMove(typ *_type, dst, src unsafe.Pointer) {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
}
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
}
if msanenabled {
msanwrite(dst, typ.Size_)
msanread(src, typ.Size_)
}
if asanenabled {
asanwrite(dst, typ.Size_)
asanread(src, typ.Size_)
}
typedmemmove(typ, dst, src)
}
//go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
reflect_typedmemmove(typ, dst, src)
}
// reflectcallmove is invoked by reflectcall to copy the return values
// out of the stack and into the heap, invoking the necessary write
// barriers. dst, src, and size describe the return value area to
// copy. typ describes the entire frame (not just the return values).
// typ may be nil, which indicates write barriers are not needed.
//
// It must be nosplit and must only call nosplit functions because the
// stack map of reflectcall is wrong.
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
if writeBarrier.needed && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
}
memmove(dst, src, size)
// Move pointers returned in registers to a place where the GC can see them.
for i := range regs.Ints {
if regs.ReturnIsPtr.Get(i) {
regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
}
}
}
//go:nosplit
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
n := dstLen
if n > srcLen {
n = srcLen
}
if n == 0 {
return 0
}
// The compiler emits calls to typedslicecopy before
// instrumentation runs, so unlike the other copying and
// assignment operations, it's not instrumented in the calling
// code and needs its own instrumentation.
if raceenabled {
callerpc := getcallerpc()
pc := abi.FuncPCABIInternal(slicecopy)
racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
}
if msanenabled {
msanwrite(dstPtr, uintptr(n)*typ.Size_)
msanread(srcPtr, uintptr(n)*typ.Size_)
}
if asanenabled {
asanwrite(dstPtr, uintptr(n)*typ.Size_)
asanread(srcPtr, uintptr(n)*typ.Size_)
}
if goexperiment.CgoCheck2 {
cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
}
if dstPtr == srcPtr {
return n
}
// Note: No point in checking typ.PtrBytes here:
// compiler only emits calls to typedslicecopy for types with pointers,
// and growslice and reflect_typedslicecopy check for pointers
// before calling typedslicecopy.
size := uintptr(n) * typ.Size_
if writeBarrier.needed {
pwsize := size - typ.Size_ + typ.PtrBytes
bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
}
// See typedmemmove for a discussion of the race between the
// barrier and memmove.
memmove(dstPtr, srcPtr, size)
return n
}
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
if elemType.PtrBytes == 0 {
return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
}
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
}
// typedmemclr clears the typed memory at ptr with type typ. The
// memory at ptr must already be initialized (and hence in type-safe
// state). If the memory is being initialized for the first time, see
// memclrNoHeapPointers.
//
// If the caller knows that typ has pointers, it can alternatively
// call memclrHasPointers.
//
// TODO: A "go:nosplitrec" annotation would be perfect for this.
//
//go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
if writeBarrier.needed && typ.PtrBytes != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes)
}
memclrNoHeapPointers(ptr, typ.Size_)
}
//go:linkname reflect_typedmemclr reflect.typedmemclr
func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
typedmemclr(typ, ptr)
}
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
if writeBarrier.needed && typ.PtrBytes != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, size)
}
memclrNoHeapPointers(ptr, size)
}
//go:linkname reflect_typedarrayclear reflect.typedarrayclear
func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
size := typ.Size_ * uintptr(len)
if writeBarrier.needed && typ.PtrBytes != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, size)
}
memclrNoHeapPointers(ptr, size)
}
*/
// memclrHasPointers clears n bytes of typed memory starting at ptr.
// The caller must ensure that the type of the object at ptr has
// pointers, usually by checking typ.PtrBytes. However, ptr
// does not have to point to the start of the allocation.
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
bulkBarrierPreWrite(uintptr(ptr), 0, n)
memclrNoHeapPointers(ptr, n)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,29 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Malloc small size classes.
//
// See malloc.go for overview.
// See also mksizeclasses.go for how we decide what size classes to use.
package runtime
// Returns size of the memory block that mallocgc will allocate if you ask for the size.
func roundupsize(size uintptr) uintptr {
return size
}
/* if size < _MaxSmallSize {
if size <= smallSizeMax-8 {
return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
} else {
return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
}
}
if size+_PageSize < size {
return size
}
return alignUp(size, _PageSize)
}
*/

View File

@@ -1052,14 +1052,21 @@ func sync_throw(s string) {
func sync_fatal(s string) { func sync_fatal(s string) {
fatal(s) fatal(s)
} }
*/
// throw triggers a fatal error that dumps a stack trace and exits. // throw triggers a fatal error that dumps a stack trace and exits.
// //
// throw should be used for runtime-internal fatal errors where Go itself, // throw should be used for runtime-internal fatal errors where Go itself,
// rather than user code, may be at fault for the failure. // rather than user code, may be at fault for the failure.
//
//go:nosplit
func throw(s string) { func throw(s string) {
fatal(s) // Everything throw does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack.
systemstack(func() {
print("fatal error: ", s, "\n")
})
fatalthrow(throwTypeRuntime)
} }
// fatal triggers a fatal error that dumps a stack trace and exits. // fatal triggers a fatal error that dumps a stack trace and exits.
@@ -1069,11 +1076,10 @@ func throw(s string) {
// //
// fatal does not include runtime frames, system goroutines, or frame metadata // fatal does not include runtime frames, system goroutines, or frame metadata
// (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher. // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
//
//go:nosplit
func fatal(s string) { func fatal(s string) {
panic("fatal error: " + s) // Everything fatal does should be recursively nosplit so it
}
/* // Everything throw does should be recursively nosplit so it
// can be called even when it's unsafe to grow the stack. // can be called even when it's unsafe to grow the stack.
systemstack(func() { systemstack(func() {
print("fatal error: ", s, "\n") print("fatal error: ", s, "\n")
@@ -1082,7 +1088,6 @@ func fatal(s string) {
fatalthrow(throwTypeUser) fatalthrow(throwTypeUser)
} }
/*
// runningPanicDefers is non-zero while running deferred functions for panic. // runningPanicDefers is non-zero while running deferred functions for panic.
// This is used to try hard to get a panic stack trace out when exiting. // This is used to try hard to get a panic stack trace out when exiting.
var runningPanicDefers atomic.Uint32 var runningPanicDefers atomic.Uint32
@@ -1134,7 +1139,17 @@ func recovery(gp *g) {
// fatalthrow implements an unrecoverable runtime throw. It freezes the // fatalthrow implements an unrecoverable runtime throw. It freezes the
// system, prints stack traces starting from its caller, and terminates the // system, prints stack traces starting from its caller, and terminates the
// process. // process.
//
//go:nosplit
func fatalthrow(t throwType) { func fatalthrow(t throwType) {
pc := getcallerpc()
sp := getcallersp()
gp := getg()
if gp.m.throwing == throwTypeNone {
gp.m.throwing = t
}
// Switch to the system stack to avoid any stack growth, which may make // Switch to the system stack to avoid any stack growth, which may make
// things worse if the runtime is in a bad state. // things worse if the runtime is in a bad state.
systemstack(func() { systemstack(func() {
@@ -1157,7 +1172,6 @@ func fatalthrow(t throwType) {
*(*int)(nil) = 0 // not reached *(*int)(nil) = 0 // not reached
} }
/*
// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
// that if msgs != nil, fatalpanic also prints panic messages and decrements // that if msgs != nil, fatalpanic also prints panic messages and decrements
// runningPanicDefers once main is blocked from exiting. // runningPanicDefers once main is blocked from exiting.

View File

@@ -4,148 +4,27 @@
package runtime package runtime
import ( import _ "unsafe"
"unsafe"
"github.com/goplus/llgo/c"
)
// Should be a built-in for unsafe.Pointer?
//
//go:linkname add llgo.advance
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer
/*
// mcall switches from the g to the g0 stack and invokes fn(g),
// where g is the goroutine that made the call.
// mcall saves g's current PC/SP in g->sched so that it can be restored later.
// It is up to fn to arrange for that later execution, typically by recording
// g in a data structure, causing something to call ready(g) later.
// mcall returns to the original goroutine g later, when g has been rescheduled.
// fn must not return at all; typically it ends by calling schedule, to let the m
// run other goroutines.
//
// mcall can only be called from g stacks (not g0, not gsignal).
//
// This must NOT be go:noescape: if fn is a stack-allocated closure,
// fn puts g on a run queue, and g executes before fn returns, the
// closure will be invalidated while it is still executing.
func mcall(fn func(*g))
// systemstack runs fn on a system stack.
// If systemstack is called from the per-OS-thread (g0) stack, or
// if systemstack is called from the signal handling (gsignal) stack,
// systemstack calls fn directly and returns.
// Otherwise, systemstack is being called from the limited stack
// of an ordinary goroutine. In this case, systemstack switches
// to the per-OS-thread stack, calls fn, and switches back.
// It is common to use a func literal as the argument, in order
// to share inputs and outputs with the code around the call
// to system stack:
//
// ... set up y ...
// systemstack(func() {
// x = bigcall(y)
// })
// ... use x ...
//
//go:noescape
func systemstack(fn func())
//go:nosplit
//go:nowritebarrierrec
func badsystemstack() {
writeErrStr("fatal: systemstack called from unexpected goroutine")
}
*/
// memclrNoHeapPointers clears n bytes starting at ptr.
//
// Usually you should use typedmemclr. memclrNoHeapPointers should be
// used only when the caller knows that *ptr contains no heap pointers
// because either:
//
// *ptr is initialized memory and its type is pointer-free, or
//
// *ptr is uninitialized memory (e.g., memory that's being reused
// for a new allocation) and hence contains only "junk".
//
// memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
// is a multiple of the pointer size, then any pointer-aligned,
// pointer-sized portion is cleared atomically. Despite the function
// name, this is necessary because this function is the underlying
// implementation of typedmemclr and memclrHasPointers. See the doc of
// memmove for more details.
//
// The (CPU-specific) implementations of this function are in memclr_*.s.
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
c.Memset(ptr, 0, n)
}
// Zeroinit initializes memory to zero.
func Zeroinit(p unsafe.Pointer, size uintptr) unsafe.Pointer {
return c.Memset(p, 0, size)
}
/*
//go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
memclrNoHeapPointers(ptr, n)
}
*/
// memmove copies n bytes from "from" to "to".
//
// memmove ensures that any pointer in "from" is written to "to" with
// an indivisible write, so that racy reads cannot observe a
// half-written pointer. This is necessary to prevent the garbage
// collector from observing invalid pointers, and differs from memmove
// in unmanaged languages. However, memmove is only required to do
// this if "from" and "to" may contain pointers, which can only be the
// case if "from", "to", and "n" are all be word-aligned.
//
//go:linkname memmove C.memmove
func memmove(to, from unsafe.Pointer, n uintptr)
/*
// Outside assembly calls memmove. Make sure it has ABI wrappers.
//
//go:linkname memmove
//go:linkname reflect_memmove reflect.memmove
func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
memmove(to, from, n)
}
// exported value for testing
const hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
*/
//go:linkname fastrand C.rand //go:linkname fastrand C.rand
func fastrand() uint32 func fastrand() uint32
/* /* TODO(xsw):
//go:nosplit func fastrand() uint32 {
func fastrandn(n uint32) uint32 {
// This is similar to fastrand() % n, but faster.
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
return uint32(uint64(fastrand()) * uint64(n) >> 32)
}
func fastrand64() uint64 {
mp := getg().m mp := getg().m
// Implement wyrand: https://github.com/wangyi-fudan/wyhash // Implement wyrand: https://github.com/wangyi-fudan/wyhash
// Only the platform that math.Mul64 can be lowered // Only the platform that math.Mul64 can be lowered
// by the compiler should be in this list. // by the compiler should be in this list.
if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64| if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le| goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
goarch.IsS390x|goarch.IsRiscv64 == 1 { goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 {
mp.fastrand += 0xa0761d6478bd642f mp.fastrand += 0xa0761d6478bd642f
hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db) hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db)
return hi ^ lo return uint32(hi ^ lo)
} }
// Implement xorshift64+: 2 32-bit xorshift sequences added together. // Implement xorshift64+: 2 32-bit xorshift sequences added together.
// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
// This generator passes the SmallCrush suite, part of TestU01 framework: // This generator passes the SmallCrush suite, part of TestU01 framework:
// http://simul.iro.umontreal.ca/testu01/tu01.html // http://simul.iro.umontreal.ca/testu01/tu01.html
@@ -153,324 +32,7 @@ func fastrand64() uint64 {
s1, s0 := t[0], t[1] s1, s0 := t[0], t[1]
s1 ^= s1 << 17 s1 ^= s1 << 17
s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16 s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
r := uint64(s0 + s1)
s0, s1 = s1, s0
s1 ^= s1 << 17
s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
r += uint64(s0+s1) << 32
t[0], t[1] = s0, s1 t[0], t[1] = s0, s1
return r return s0 + s1
} }
func fastrandu() uint {
if goarch.PtrSize == 4 {
return uint(fastrand())
}
return uint(fastrand64())
}
//go:linkname rand_fastrand64 math/rand.fastrand64
func rand_fastrand64() uint64 { return fastrand64() }
//go:linkname sync_fastrandn sync.fastrandn
func sync_fastrandn(n uint32) uint32 { return fastrandn(n) }
//go:linkname net_fastrandu net.fastrandu
func net_fastrandu() uint { return fastrandu() }
//go:linkname os_fastrand os.fastrand
func os_fastrand() uint32 { return fastrand() }
// in internal/bytealg/equal_*.s
//
//go:noescape
func memequal(a, b unsafe.Pointer, size uintptr) bool
// noescape hides a pointer from escape analysis. noescape is
// the identity function but escape analysis doesn't think the
// output depends on the input. noescape is inlined and currently
// compiles down to zero instructions.
// USE CAREFULLY!
func noescape(p unsafe.Pointer) unsafe.Pointer {
x := uintptr(p)
return unsafe.Pointer(x ^ 0)
}
// noEscapePtr hides a pointer from escape analysis. See noescape.
// USE CAREFULLY!
//
//go:nosplit
func noEscapePtr[T any](p *T) *T {
x := uintptr(unsafe.Pointer(p))
return (*T)(unsafe.Pointer(x ^ 0))
}
// Not all cgocallback frames are actually cgocallback,
// so not all have these arguments. Mark them uintptr so that the GC
// does not misinterpret memory when the arguments are not present.
// cgocallback is not called from Go, only from crosscall2.
// This in turn calls cgocallbackg, which is where we'll find
// pointer-declared arguments.
//
// When fn is nil (frame is saved g), call dropm instead,
// this is used when the C thread is exiting.
func cgocallback(fn, frame, ctxt uintptr)
func gogo(buf *gobuf)
func asminit()
func setg(gg *g)
func breakpoint()
// reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
// frameSize, and regArgs.
//
// Arguments passed on the stack and space for return values passed on the stack
// must be laid out at the space pointed to by stackArgs (with total length
// stackArgsSize) according to the ABI.
//
// stackRetOffset must be some value <= stackArgsSize that indicates the
// offset within stackArgs where the return value space begins.
//
// frameSize is the total size of the argument frame at stackArgs and must
// therefore be >= stackArgsSize. It must include additional space for spilling
// register arguments for stack growth and preemption.
//
// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
// since frameSize will be redundant with stackArgsSize.
//
// Arguments passed in registers must be laid out in regArgs according to the ABI.
// regArgs will hold any return values passed in registers after the call.
//
// reflectcall copies stack arguments from stackArgs to the goroutine stack, and
// then copies back stackArgsSize-stackRetOffset bytes back to the return space
// in stackArgs once fn has completed. It also "unspills" argument registers from
// regArgs before calling fn, and spills them back into regArgs immediately
// following the call to fn. If there are results being returned on the stack,
// the caller should pass the argument frame type as stackArgsType so that
// reflectcall can execute appropriate write barriers during the copy.
//
// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
// registers on the return path will contain Go pointers. It will then store
// these pointers in regArgs.Ptrs such that they are visible to the GC.
//
// Package reflect passes a frame type. In package runtime, there is only
// one call that copies results back, in callbackWrap in syscall_windows.go, and it
// does NOT pass a frame type, meaning there are no write barriers invoked. See that
// call site for justification.
//
// Package reflect accesses this symbol through a linkname.
//
// Arguments passed through to reflectcall do not escape. The type is used
// only in a very limited callee of reflectcall, the stackArgs are copied, and
// regArgs is only used in the reflectcall frame.
//
//go:noescape
func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func procyield(cycles uint32)
type neverCallThisFunction struct{}
// goexit is the return stub at the top of every goroutine call stack.
// Each goroutine stack is constructed as if goexit called the
// goroutine's entry point function, so that when the entry point
// function returns, it will return to goexit, which will call goexit1
// to perform the actual exit.
//
// This function must never be called directly. Call goexit1 instead.
// gentraceback assumes that goexit terminates the stack. A direct
// call on the stack will cause gentraceback to stop walking the stack
// prematurely and if there is leftover state it may panic.
func goexit(neverCallThisFunction)
// publicationBarrier performs a store/store barrier (a "publication"
// or "export" barrier). Some form of synchronization is required
// between initializing an object and making that object accessible to
// another processor. Without synchronization, the initialization
// writes and the "publication" write may be reordered, allowing the
// other processor to follow the pointer and observe an uninitialized
// object. In general, higher-level synchronization should be used,
// such as locking or an atomic pointer write. publicationBarrier is
// for when those aren't an option, such as in the implementation of
// the memory manager.
//
// There's no corresponding barrier for the read side because the read
// side naturally has a data dependency order. All architectures that
// Go supports or seems likely to ever support automatically enforce
// data dependency ordering.
func publicationBarrier()
// getcallerpc returns the program counter (PC) of its caller's caller.
// getcallersp returns the stack pointer (SP) of its caller's caller.
// The implementation may be a compiler intrinsic; there is not
// necessarily code implementing this on every platform.
//
// For example:
//
// func f(arg1, arg2, arg3 int) {
// pc := getcallerpc()
// sp := getcallersp()
// }
//
// These two lines find the PC and SP immediately following
// the call to f (where f will return).
//
// The call to getcallerpc and getcallersp must be done in the
// frame being asked about.
//
// The result of getcallersp is correct at the time of the return,
// but it may be invalidated by any subsequent call to a function
// that might relocate the stack in order to grow or shrink it.
// A general rule is that the result of getcallersp should be used
// immediately and can only be passed to nosplit functions.
//go:noescape
func getcallerpc() uintptr
//go:noescape
func getcallersp() uintptr // implemented as an intrinsic on all platforms
// getclosureptr returns the pointer to the current closure.
// getclosureptr can only be used in an assignment statement
// at the entry of a function. Moreover, go:nosplit directive
// must be specified at the declaration of caller function,
// so that the function prolog does not clobber the closure register.
// for example:
//
// //go:nosplit
// func f(arg1, arg2, arg3 int) {
// dx := getclosureptr()
// }
//
// The compiler rewrites calls to this function into instructions that fetch the
// pointer from a well-known register (DX on x86 architecture, etc.) directly.
func getclosureptr() uintptr
//go:noescape
func asmcgocall(fn, arg unsafe.Pointer) int32
func morestack()
func morestack_noctxt()
func rt0_go()
// return0 is a stub used to return 0 from deferproc.
// It is called at the very end of deferproc to signal
// the calling Go function that it should not jump
// to deferreturn.
// in asm_*.s
func return0()
// in asm_*.s
// not called directly; definitions here supply type information for traceback.
// These must have the same signature (arg pointer map) as reflectcall.
func call16(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call32(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call64(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call128(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call256(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call512(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call1024(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call2048(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call4096(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call8192(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call16384(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call32768(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call65536(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call131072(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call262144(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call524288(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call1048576(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call2097152(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call4194304(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call8388608(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call16777216(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call33554432(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call67108864(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call134217728(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call268435456(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call536870912(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func systemstack_switch()
// alignUp rounds n up to a multiple of a. a must be a power of 2.
func alignUp(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1)
}
// alignDown rounds n down to a multiple of a. a must be a power of 2.
func alignDown(n, a uintptr) uintptr {
return n &^ (a - 1)
}
// divRoundUp returns ceil(n / a).
func divRoundUp(n, a uintptr) uintptr {
// a is generally a power of two. This will get inlined and
// the compiler will optimize the division.
return (n + a - 1) / a
}
// checkASM reports whether assembly runtime checks have passed.
func checkASM() bool
func memequal_varlen(a, b unsafe.Pointer) bool
// bool2int returns 0 if x is false or 1 if x is true.
func bool2int(x bool) int {
// Avoid branches. In the SSA compiler, this compiles to
// exactly what you would want it to.
return int(uint8(*(*uint8)(unsafe.Pointer(&x))))
}
// abort crashes the runtime in situations where even throw might not
// work. In general it should do something a debugger will recognize
// (e.g., an INT3 on x86). A crash in abort is recognized by the
// signal handler, which will attempt to tear down the runtime
// immediately.
func abort()
// Called from compiled code; declared for vet; do NOT call from Go.
func gcWriteBarrier1()
func gcWriteBarrier2()
func gcWriteBarrier3()
func gcWriteBarrier4()
func gcWriteBarrier5()
func gcWriteBarrier6()
func gcWriteBarrier7()
func gcWriteBarrier8()
func duffzero()
func duffcopy()
// Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
func addmoduledata()
// Injected by the signal handler for panicking signals.
// Initializes any registers that have fixed meaning at calls but
// are scratch in bodies and calls sigpanic.
// On many platforms it just jumps to sigpanic.
func sigpanic0()
// intArgRegs is used by the various register assignment
// algorithm implementations in the runtime. These include:.
// - Finalizers (mfinal.go)
// - Windows callbacks (syscall_windows.go)
//
// Both are stripped-down versions of the algorithm since they
// only have to deal with a subset of cases (finalizers only
// take a pointer or interface argument, Go Windows callbacks
// don't support floating point).
//
// It should be modified with care and are generally only
// modified when testing this package.
//
// It should never be set higher than its internal/abi
// constant counterparts, because the system relies on a
// structure that is at least large enough to hold the
// registers the system supports.
//
// Protected by finlock.
var intArgRegs = abi.IntArgRegs
*/ */

31
internal/runtime/type.go Normal file
View File

@@ -0,0 +1,31 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Runtime type representation.
package runtime
import (
"github.com/goplus/llgo/internal/abi"
)
type _type = abi.Type
/*
type maptype = abi.MapType
type arraytype = abi.ArrayType
type chantype = abi.ChanType
type slicetype = abi.SliceType
type functype = abi.FuncType
type ptrtype = abi.PtrType
type name = abi.Name
type structtype = abi.StructType
*/

View File

@@ -23,13 +23,6 @@ import (
"github.com/goplus/llgo/internal/runtime/c" "github.com/goplus/llgo/internal/runtime/c"
) )
type _type = abi.Type
// isDirectIface reports whether t is stored directly in an interface value.
func isDirectIface(t *_type) bool {
return t.Kind_&abi.KindDirectIface != 0
}
type eface struct { type eface struct {
_type *_type _type *_type
data unsafe.Pointer data unsafe.Pointer

View File

@@ -16,113 +16,10 @@
package runtime package runtime
import (
"unsafe"
"github.com/goplus/llgo/internal/abi"
)
// Map represents a Go map. // Map represents a Go map.
type Map = hmap type Map = hmap
type MapType = abi.MapType
// MakeSmallMap creates a new small map. // MakeSmallMap creates a new small map.
func MakeSmallMap() *Map { func MakeSmallMap() *Map {
return makemap_small() return makemap_small()
} }
// MakeMap creates a new map.
func MakeMap(t *MapType, hint int, at *Map) *Map {
return makemap(t, hint, at)
}
// MapAssign finds a key in map m and returns the elem address to assign.
func MapAssign(t *MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
return mapassign(t, m, key)
}
func isReflexive(key *Type) bool {
return true // TODO(xsw): false for float/complex type
}
func hashOf(t *Type) func(key unsafe.Pointer, hash0 uintptr) uintptr {
if t.TFlag&abi.TFlagRegularMemory != 0 {
switch t.Size_ {
case 4:
return memhash32
case 8:
return memhash64
}
return func(key unsafe.Pointer, hash0 uintptr) uintptr {
return memhash(key, hash0, t.Size_)
}
}
switch t.Kind() {
case abi.Float32:
return f32hash
case abi.Float64:
return f64hash
case abi.Complex64:
return c64hash
case abi.Complex128:
return c128hash
case abi.String:
return strhash
case abi.Interface:
i := (*interfacetype)(unsafe.Pointer(t))
if len(i.Methods) == 0 {
return nilinterhash
}
return interhash
}
return func(key unsafe.Pointer, hash0 uintptr) uintptr {
return typehash(t, key, hash0)
}
}
// MapOf creates a new map type.
func MapOf(key, elem *Type) *MapType {
var flags uint32
keySlot, elemSlot := key, elem
ptrTy := Basic(abi.UnsafePointer)
if keySlot.Size_ > 128 {
keySlot = ptrTy
flags |= 1
}
if elemSlot.Size_ > 128 {
elemSlot = ptrTy
flags |= 2
}
if isReflexive(key) {
flags |= 4
}
tophashTy := ArrayOf(bucketCnt, Basic(abi.Uint8))
keysTy := ArrayOf(bucketCnt, keySlot)
elemsTy := ArrayOf(bucketCnt, elemSlot)
tophash := StructField("tophash", tophashTy, 0, "", false)
keys := StructField("keys", keysTy, tophashTy.Size_, "", false)
elems := StructField("elems", elemsTy, keys.Offset+keysTy.Size_, "", false)
overflow := StructField("overflow", ptrTy, elems.Offset+elemsTy.Size_, "", false)
bucket := Struct("", overflow.Offset+ptrTy.Size_, tophash, keys, elems, overflow)
ret := &abi.MapType{
Type: abi.Type{
Size_: unsafe.Sizeof(uintptr(0)),
Hash: uint32(abi.Map),
Kind_: uint8(abi.Map),
},
Key: key,
Elem: elem,
Bucket: bucket,
Hasher: hashOf(key),
KeySize: uint8(keySlot.Size_), // size of key slot
ValueSize: uint8(elemSlot.Size_), // size of elem slot
BucketSize: uint16(bucket.Size_), // size of bucket
Flags: flags,
}
return ret
}

View File

@@ -93,3 +93,10 @@ func stringTracef(fp c.FilePtr, format *c.Char, s String) {
} }
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Zeroinit initializes memory to zero.
func Zeroinit(p unsafe.Pointer, size uintptr) unsafe.Pointer {
return c.Memset(p, 0, size)
}
// -----------------------------------------------------------------------------

View File

@@ -24,8 +24,6 @@ import (
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// type Slice = slice
// Slice is the runtime representation of a slice. // Slice is the runtime representation of a slice.
type Slice struct { type Slice struct {
data unsafe.Pointer data unsafe.Pointer

View File

@@ -19,10 +19,10 @@ To run the demos in directory `_demo`, you need to set the `LLGO_LIB_PYTHON` env
export LLGO_LIB_PYTHON=/foo/bar/python3.12 export LLGO_LIB_PYTHON=/foo/bar/python3.12
``` ```
For example, `/opt/homebrew/Frameworks/Python.framework/Versions/3.12/lib/libpython3.12.dylib` is a typical python lib location under macOS. So we should set it like this: For example, `/opt/homebrew/Frameworks/Python.framework/Versions/3.12/libpython3.12.dylib` is a typical python lib location under macOS. So we should set it like this:
```sh ```sh
export LLGO_LIB_PYTHON=/opt/homebrew/Frameworks/Python.framework/Versions/3.12/lib/python3.12 export LLGO_LIB_PYTHON=/opt/homebrew/Frameworks/Python.framework/Versions/3.12/python3.12
``` ```
Then you can run the demos in directory `_demo`: Then you can run the demos in directory `_demo`:

View File

@@ -159,10 +159,6 @@ func (b *Builder) TypeName(t types.Type) (ret string, pub bool) {
return "_llgo_any", true return "_llgo_any", true
} }
return b.InterfaceName(t) return b.InterfaceName(t)
case *types.Map:
key, pub1 := b.TypeName(t.Key())
elem, pub2 := b.TypeName(t.Elem())
return fmt.Sprintf("map[%s]%s", key, elem), pub1 && pub2
} }
log.Panicf("todo: %T\n", t) log.Panicf("todo: %T\n", t)
return return

View File

@@ -66,8 +66,6 @@ func (b Builder) abiTypeOf(t types.Type) func() Expr {
return b.abiFuncOf(t) return b.abiFuncOf(t)
case *types.Slice: case *types.Slice:
return b.abiSliceOf(t) return b.abiSliceOf(t)
case *types.Map:
return b.abiMapOf(t)
case *types.Array: case *types.Array:
return b.abiArrayOf(t) return b.abiArrayOf(t)
} }
@@ -246,14 +244,6 @@ func (b Builder) abiPointerOf(t *types.Pointer) func() Expr {
} }
} }
func (b Builder) abiMapOf(t *types.Map) func() Expr {
key := b.abiType(t.Key())
elem := b.abiType(t.Elem())
return func() Expr {
return b.Call(b.Pkg.rtFunc("MapOf"), key, elem)
}
}
func (b Builder) abiSliceOf(t *types.Slice) func() Expr { func (b Builder) abiSliceOf(t *types.Slice) func() Expr {
elem := b.abiType(t.Elem()) elem := b.abiType(t.Elem())
return func() Expr { return func() Expr {

View File

@@ -360,63 +360,8 @@ func (b Builder) MapUpdate(m, k, v Expr) {
if debugInstr { if debugInstr {
log.Printf("MapUpdate %v[%v] = %v\n", m.impl, k.impl, v.impl) log.Printf("MapUpdate %v[%v] = %v\n", m.impl, k.impl, v.impl)
} }
t := m.Type // TODO(xsw)
if t.kind != vkMap { // panic("todo")
panic("TODO: not a map")
}
tabi := b.abiType(t.raw.Type)
prog := b.Prog
mptr := b.dupAlloca(m)
ptrimpl := b.InlineCall(b.Pkg.rtFunc("MapAssign"), tabi, mptr, k).impl
ptr := Expr{ptrimpl, prog.Pointer(v.Type)}
b.Store(ptr, v) // TODO(xsw): indirect store
}
// -----------------------------------------------------------------------------
// The Range instruction yields an iterator over the domain and range
// of X, which must be a string or map.
//
// Elements are accessed via Next.
//
// Type() returns an opaque and degenerate "rangeIter" type.
//
// Pos() returns the ast.RangeStmt.For.
//
// Example printed form:
//
// t0 = range "hello":string
func (b Builder) Range(x Expr) Expr {
switch x.kind {
case vkString:
return b.InlineCall(b.Pkg.rtFunc("NewStringIter"), x)
}
panic("todo")
}
// The Next instruction reads and advances the (map or string)
// iterator Iter and returns a 3-tuple value (ok, k, v). If the
// iterator is not exhausted, ok is true and k and v are the next
// elements of the domain and range, respectively. Otherwise ok is
// false and k and v are undefined.
//
// Components of the tuple are accessed using Extract.
//
// The IsString field distinguishes iterators over strings from those
// over maps, as the Type() alone is insufficient: consider
// map[int]rune.
//
// Type() returns a *types.Tuple for the triple (ok, k, v).
// The types of k and/or v may be types.Invalid.
//
// Example printed form:
//
// t1 = next t0
func (b Builder) Next(iter Expr, isString bool) (ret Expr) {
if isString {
return b.InlineCall(b.Pkg.rtFunc("StringIterNext"), iter)
}
panic("todo")
} }
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------

View File

@@ -669,6 +669,53 @@ func castPtr(b llvm.Builder, x llvm.Value, t llvm.Type) llvm.Value {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// The Range instruction yields an iterator over the domain and range
// of X, which must be a string or map.
//
// Elements are accessed via Next.
//
// Type() returns an opaque and degenerate "rangeIter" type.
//
// Pos() returns the ast.RangeStmt.For.
//
// Example printed form:
//
// t0 = range "hello":string
func (b Builder) Range(x Expr) Expr {
switch x.kind {
case vkString:
return b.InlineCall(b.Pkg.rtFunc("NewStringIter"), x)
}
panic("todo")
}
// The Next instruction reads and advances the (map or string)
// iterator Iter and returns a 3-tuple value (ok, k, v). If the
// iterator is not exhausted, ok is true and k and v are the next
// elements of the domain and range, respectively. Otherwise ok is
// false and k and v are undefined.
//
// Components of the tuple are accessed using Extract.
//
// The IsString field distinguishes iterators over strings from those
// over maps, as the Type() alone is insufficient: consider
// map[int]rune.
//
// Type() returns a *types.Tuple for the triple (ok, k, v).
// The types of k and/or v may be types.Invalid.
//
// Example printed form:
//
// t1 = next t0
func (b Builder) Next(iter Expr, isString bool) (ret Expr) {
if isString {
return b.InlineCall(b.Pkg.rtFunc("StringIterNext"), iter)
}
panic("todo")
}
// -----------------------------------------------------------------------------
// The MakeClosure instruction yields a closure value whose code is // The MakeClosure instruction yields a closure value whose code is
// Fn and whose free variables' values are supplied by Bindings. // Fn and whose free variables' values are supplied by Bindings.
// //

View File

@@ -107,15 +107,16 @@ func aggregateInit(b llvm.Builder, ptr llvm.Value, tll llvm.Type, flds ...llvm.V
} }
} }
func (b Builder) dupAlloca(v Expr) Expr { /*
func (b Builder) dupMalloc(v Expr) Expr {
prog := b.Prog prog := b.Prog
n := prog.SizeOf(v.Type) n := prog.SizeOf(v.Type)
tptr := prog.Pointer(v.Type) tptr := prog.Pointer(v.Type)
ptr := b.Alloca(prog.Val(uintptr(n))).impl ptr := b.malloc(prog.Val(uintptr(n))).impl
ret := Expr{ptr, tptr} b.Store(Expr{ptr, tptr}, v)
b.Store(ret, v) return Expr{ptr, tptr}
return ret
} }
*/
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------

View File

@@ -27,17 +27,6 @@ import (
"github.com/goplus/llvm" "github.com/goplus/llvm"
) )
func TestMapUpdate(t *testing.T) {
var b Builder
var m = Expr{Type: &aType{}}
defer func() {
if e := recover(); e == nil {
t.Log("MapUpdate: no error?")
}
}()
b.MapUpdate(m, m, m)
}
func TestEndDefer(t *testing.T) { func TestEndDefer(t *testing.T) {
prog := NewProgram(nil) prog := NewProgram(nil)
pkg := prog.NewPackage("foo", "foo") pkg := prog.NewPackage("foo", "foo")