From 66a537ad29cbc3b5f149f5889ecb6b9832e4bd6d Mon Sep 17 00:00:00 2001 From: Haolan Date: Fri, 19 Sep 2025 11:27:33 +0800 Subject: [PATCH] fix: add gc dummy mutex --- runtime/internal/lib/runtime/runtime2.go | 6 - runtime/internal/lib/runtime/runtime_gc.go | 10 +- .../lib/runtime/runtime_gc_baremetal.go | 20 +- runtime/internal/runtime/tinygogc/gc.go | 177 ++++++++++++++++++ .../tinygogc/{gc_llgo.go => gc_link.go} | 3 + .../internal/runtime/tinygogc/gc_tinygo.go | 32 +++- runtime/internal/runtime/tinygogc/mutex.go | 7 + targets/esp32.app.elf.ld | 1 + 8 files changed, 240 insertions(+), 16 deletions(-) rename runtime/internal/runtime/tinygogc/{gc_llgo.go => gc_link.go} (92%) create mode 100644 runtime/internal/runtime/tinygogc/mutex.go diff --git a/runtime/internal/lib/runtime/runtime2.go b/runtime/internal/lib/runtime/runtime2.go index db2db492..0fc08064 100644 --- a/runtime/internal/lib/runtime/runtime2.go +++ b/runtime/internal/lib/runtime/runtime2.go @@ -4,8 +4,6 @@ package runtime -import "runtime" - // Layout of in-memory per-function information prepared by linker // See https://golang.org/s/go12symtab. // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) @@ -30,10 +28,6 @@ func StopTrace() { panic("todo: runtime.StopTrace") } -func ReadMemStats(m *runtime.MemStats) { - panic("todo: runtime.ReadMemStats") -} - func SetMutexProfileFraction(rate int) int { panic("todo: runtime.SetMutexProfileFraction") } diff --git a/runtime/internal/lib/runtime/runtime_gc.go b/runtime/internal/lib/runtime/runtime_gc.go index a6d76401..38de11a0 100644 --- a/runtime/internal/lib/runtime/runtime_gc.go +++ b/runtime/internal/lib/runtime/runtime_gc.go @@ -2,7 +2,15 @@ package runtime -import "github.com/goplus/llgo/runtime/internal/clite/bdwgc" +import ( + "runtime" + + "github.com/goplus/llgo/runtime/internal/clite/bdwgc" +) + +func ReadMemStats(m *runtime.MemStats) { + panic("todo: runtime.ReadMemStats") +} func GC() { bdwgc.Gcollect() diff --git a/runtime/internal/lib/runtime/runtime_gc_baremetal.go b/runtime/internal/lib/runtime/runtime_gc_baremetal.go index b6192a44..43ab5573 100644 --- a/runtime/internal/lib/runtime/runtime_gc_baremetal.go +++ b/runtime/internal/lib/runtime/runtime_gc_baremetal.go @@ -2,7 +2,25 @@ package runtime -import "github.com/goplus/llgo/runtime/internal/runtime/tinygogc" +import ( + "runtime" + + "github.com/goplus/llgo/runtime/internal/runtime/tinygogc" +) + +func ReadMemStats(m *runtime.MemStats) { + stats := tinygogc.ReadGCStats() + m.StackInuse = stats.StackInuse + m.StackSys = stats.StackSys + m.HeapSys = stats.HeapSys + m.GCSys = stats.GCSys + m.TotalAlloc = stats.TotalAlloc + m.Mallocs = stats.Mallocs + m.Frees = stats.Frees + m.Sys = stats.Sys + m.HeapAlloc = stats.HeapAlloc + m.Alloc = stats.Alloc +} func GC() { tinygogc.GC() diff --git a/runtime/internal/runtime/tinygogc/gc.go b/runtime/internal/runtime/tinygogc/gc.go index bfe4149e..5bb673b7 100644 --- a/runtime/internal/runtime/tinygogc/gc.go +++ b/runtime/internal/runtime/tinygogc/gc.go @@ -20,3 +20,180 @@ func __wrap_calloc(size uintptr) unsafe.Pointer { func __wrap_realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer { return Realloc(ptr, size) } + +type GCStats struct { + // General statistics. + + // Alloc is bytes of allocated heap objects. + // + // This is the same as HeapAlloc (see below). + Alloc uint64 + + // TotalAlloc is cumulative bytes allocated for heap objects. + // + // TotalAlloc increases as heap objects are allocated, but + // unlike Alloc and HeapAlloc, it does not decrease when + // objects are freed. + TotalAlloc uint64 + + // Sys is the total bytes of memory obtained from the OS. + // + // Sys is the sum of the XSys fields below. Sys measures the + // virtual address space reserved by the Go runtime for the + // heap, stacks, and other internal data structures. It's + // likely that not all of the virtual address space is backed + // by physical memory at any given moment, though in general + // it all was at some point. + Sys uint64 + + // Mallocs is the cumulative count of heap objects allocated. + // The number of live objects is Mallocs - Frees. + Mallocs uint64 + + // Frees is the cumulative count of heap objects freed. + Frees uint64 + + // Heap memory statistics. + // + // Interpreting the heap statistics requires some knowledge of + // how Go organizes memory. Go divides the virtual address + // space of the heap into "spans", which are contiguous + // regions of memory 8K or larger. A span may be in one of + // three states: + // + // An "idle" span contains no objects or other data. The + // physical memory backing an idle span can be released back + // to the OS (but the virtual address space never is), or it + // can be converted into an "in use" or "stack" span. + // + // An "in use" span contains at least one heap object and may + // have free space available to allocate more heap objects. + // + // A "stack" span is used for goroutine stacks. Stack spans + // are not considered part of the heap. A span can change + // between heap and stack memory; it is never used for both + // simultaneously. + + // HeapAlloc is bytes of allocated heap objects. + // + // "Allocated" heap objects include all reachable objects, as + // well as unreachable objects that the garbage collector has + // not yet freed. Specifically, HeapAlloc increases as heap + // objects are allocated and decreases as the heap is swept + // and unreachable objects are freed. Sweeping occurs + // incrementally between GC cycles, so these two processes + // occur simultaneously, and as a result HeapAlloc tends to + // change smoothly (in contrast with the sawtooth that is + // typical of stop-the-world garbage collectors). + HeapAlloc uint64 + + // HeapSys is bytes of heap memory obtained from the OS. + // + // HeapSys measures the amount of virtual address space + // reserved for the heap. This includes virtual address space + // that has been reserved but not yet used, which consumes no + // physical memory, but tends to be small, as well as virtual + // address space for which the physical memory has been + // returned to the OS after it became unused (see HeapReleased + // for a measure of the latter). + // + // HeapSys estimates the largest size the heap has had. + HeapSys uint64 + + // HeapIdle is bytes in idle (unused) spans. + // + // Idle spans have no objects in them. These spans could be + // (and may already have been) returned to the OS, or they can + // be reused for heap allocations, or they can be reused as + // stack memory. + // + // HeapIdle minus HeapReleased estimates the amount of memory + // that could be returned to the OS, but is being retained by + // the runtime so it can grow the heap without requesting more + // memory from the OS. If this difference is significantly + // larger than the heap size, it indicates there was a recent + // transient spike in live heap size. + HeapIdle uint64 + + // HeapInuse is bytes in in-use spans. + // + // In-use spans have at least one object in them. These spans + // can only be used for other objects of roughly the same + // size. + // + // HeapInuse minus HeapAlloc estimates the amount of memory + // that has been dedicated to particular size classes, but is + // not currently being used. This is an upper bound on + // fragmentation, but in general this memory can be reused + // efficiently. + HeapInuse uint64 + + // Stack memory statistics. + // + // Stacks are not considered part of the heap, but the runtime + // can reuse a span of heap memory for stack memory, and + // vice-versa. + + // StackInuse is bytes in stack spans. + // + // In-use stack spans have at least one stack in them. These + // spans can only be used for other stacks of the same size. + // + // There is no StackIdle because unused stack spans are + // returned to the heap (and hence counted toward HeapIdle). + StackInuse uint64 + + // StackSys is bytes of stack memory obtained from the OS. + // + // StackSys is StackInuse, plus any memory obtained directly + // from the OS for OS thread stacks. + // + // In non-cgo programs this metric is currently equal to StackInuse + // (but this should not be relied upon, and the value may change in + // the future). + // + // In cgo programs this metric includes OS thread stacks allocated + // directly from the OS. Currently, this only accounts for one stack in + // c-shared and c-archive build modes and other sources of stacks from + // the OS (notably, any allocated by C code) are not currently measured. + // Note this too may change in the future. + StackSys uint64 + + // GCSys is bytes of memory in garbage collection metadata. + GCSys uint64 +} + +func ReadGCStats() GCStats { + var heapInuse, heapIdle uint64 + + lock(&gcMutex) + + for block := uintptr(0); block < endBlock; block++ { + bstate := gcStateOf(block) + if bstate == blockStateFree { + heapIdle += uint64(bytesPerBlock) + } else { + heapInuse += uint64(bytesPerBlock) + } + } + + stackEnd := uintptr(unsafe.Pointer(&_stackEnd)) + stackSys := stackTop - stackEnd + + stats := GCStats{ + StackInuse: uint64(stackTop - uintptr(getsp())), + StackSys: uint64(stackSys), + HeapSys: heapInuse + heapIdle, + GCSys: uint64(heapEnd - uintptr(metadataStart)), + TotalAlloc: gcTotalAlloc, + Mallocs: gcMallocs, + Frees: gcFrees, + Sys: uint64(heapEnd - heapStart), + HeapAlloc: (gcTotalBlocks - gcFreedBlocks) * uint64(bytesPerBlock), + Alloc: (gcTotalBlocks - gcFreedBlocks) * uint64(bytesPerBlock), + } + + unlock(&gcMutex) + + return stats +} diff --git a/runtime/internal/runtime/tinygogc/gc_llgo.go b/runtime/internal/runtime/tinygogc/gc_link.go similarity index 92% rename from runtime/internal/runtime/tinygogc/gc_llgo.go rename to runtime/internal/runtime/tinygogc/gc_link.go index 5f2d73e8..31e025c0 100644 --- a/runtime/internal/runtime/tinygogc/gc_llgo.go +++ b/runtime/internal/runtime/tinygogc/gc_link.go @@ -28,6 +28,9 @@ var _heapEnd [0]byte //go:linkname _stackStart _stack_top var _stackStart [0]byte +//go:linkname _stackEnd _stack_end +var _stackEnd [0]byte + //go:linkname _globals_start _globals_start var _globals_start [0]byte diff --git a/runtime/internal/runtime/tinygogc/gc_tinygo.go b/runtime/internal/runtime/tinygogc/gc_tinygo.go index 1081dafb..5aa476ae 100644 --- a/runtime/internal/runtime/tinygogc/gc_tinygo.go +++ b/runtime/internal/runtime/tinygogc/gc_tinygo.go @@ -50,7 +50,6 @@ var ( stackTop uintptr // the top of stack endBlock uintptr // GC end block index metadataStart unsafe.Pointer // start address of GC metadata - isGCInit bool nextAlloc uintptr // the next block that should be tried by the allocator gcTotalAlloc uint64 // total number of bytes allocated @@ -65,6 +64,9 @@ var ( // zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes. zeroSizedAlloc uint8 + + gcMutex mutex // gcMutex protects GC related variables + isGCInit bool // isGCInit indicates GC initialization state ) // Some globals + constants for the entire GC. @@ -220,16 +222,22 @@ func isOnHeap(ptr uintptr) bool { return ptr >= heapStart && ptr < uintptr(metadataStart) } +func isPointer(ptr uintptr) bool { + // TODO: implement precise GC + return isOnHeap(ptr) +} + // alloc tries to find some free space on the heap, possibly doing a garbage // collection cycle if needed. If no space is free, it panics. // //go:noinline func Alloc(size uintptr) unsafe.Pointer { - lazyInit() - if size == 0 { return unsafe.Pointer(&zeroSizedAlloc) } + lock(&gcMutex) + lazyInit() + gcTotalAlloc += uint64(size) gcMallocs++ @@ -250,7 +258,7 @@ func Alloc(size uintptr) unsafe.Pointer { // could be found. Run a garbage collection cycle to reclaim // free memory and try again. heapScanCount = 2 - freeBytes := GC() + freeBytes := gc() heapSize := uintptr(metadataStart) - heapStart if freeBytes < heapSize/3 { // Ensure there is at least 33% headroom. @@ -308,7 +316,7 @@ func Alloc(size uintptr) unsafe.Pointer { for i := thisAlloc + 1; i != nextAlloc; i++ { gcSetState(i, blockStateTail) } - + unlock(&gcMutex) // Return a pointer to this allocation. return memset(gcPointerOf(thisAlloc), 0, size) } @@ -316,10 +324,12 @@ func Alloc(size uintptr) unsafe.Pointer { } func Realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer { - lazyInit() if ptr == nil { return Alloc(size) } + lock(&gcMutex) + lazyInit() + unlock(&gcMutex) ptrAddress := uintptr(ptr) endOfTailAddress := gcAddressOf(gcFindNext(blockFromAddr(ptrAddress))) @@ -342,10 +352,16 @@ func free(ptr unsafe.Pointer) { // TODO: free blocks on request, when the compiler knows they're unused. } +func GC() { + lock(&gcMutex) + gc() + unlock(&gcMutex) +} + // runGC performs a garbage collection cycle. It is the internal implementation // of the runtime.GC() function. The difference is that it returns the number of // free bytes in the heap after the GC is finished. -func GC() (freeBytes uintptr) { +func gc() (freeBytes uintptr) { lazyInit() if gcDebug { @@ -403,7 +419,7 @@ func startMark(root uintptr) { // Load the word. word := *(*uintptr)(unsafe.Pointer(addr)) - if !isOnHeap(word) { + if !isPointer(word) { // Not a heap pointer. continue } diff --git a/runtime/internal/runtime/tinygogc/mutex.go b/runtime/internal/runtime/tinygogc/mutex.go new file mode 100644 index 00000000..76d6fbbb --- /dev/null +++ b/runtime/internal/runtime/tinygogc/mutex.go @@ -0,0 +1,7 @@ +package tinygogc + +type mutex struct{} + +func lock(m *mutex) {} + +func unlock(m *mutex) {} diff --git a/targets/esp32.app.elf.ld b/targets/esp32.app.elf.ld index 94b90611..7bd5e38d 100755 --- a/targets/esp32.app.elf.ld +++ b/targets/esp32.app.elf.ld @@ -27,6 +27,7 @@ SECTIONS .stack (NOLOAD) : { + _stack_end = .; . = ALIGN(16); . += 16K; __stack = .;