feat: implement baremetal GC
fix: pthread gc fix: xiao-esp32c3 symbol refactor: use clite memset instead of linking fix: stack top symbol
This commit is contained in:
@@ -753,7 +753,6 @@ func compileExtraFiles(ctx *context, verbose bool) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func linkMainPkg(ctx *context, pkg *packages.Package, pkgs []*aPackage, global llssa.Package, outputPath string, verbose bool) error {
|
func linkMainPkg(ctx *context, pkg *packages.Package, pkgs []*aPackage, global llssa.Package, outputPath string, verbose bool) error {
|
||||||
|
|
||||||
needRuntime := false
|
needRuntime := false
|
||||||
needPyInit := false
|
needPyInit := false
|
||||||
pkgsMap := make(map[*packages.Package]*aPackage, len(pkgs))
|
pkgsMap := make(map[*packages.Package]*aPackage, len(pkgs))
|
||||||
@@ -998,6 +997,10 @@ define weak void @runtime.init() {
|
|||||||
ret void
|
ret void
|
||||||
}
|
}
|
||||||
|
|
||||||
|
define weak void @initGC() {
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
; TODO(lijie): workaround for syscall patch
|
; TODO(lijie): workaround for syscall patch
|
||||||
define weak void @"syscall.init"() {
|
define weak void @"syscall.init"() {
|
||||||
ret void
|
ret void
|
||||||
@@ -1009,6 +1012,7 @@ define weak void @"syscall.init"() {
|
|||||||
_llgo_0:
|
_llgo_0:
|
||||||
store i32 %%0, ptr @__llgo_argc, align 4
|
store i32 %%0, ptr @__llgo_argc, align 4
|
||||||
store ptr %%1, ptr @__llgo_argv, align 8
|
store ptr %%1, ptr @__llgo_argv, align 8
|
||||||
|
call void @initGC()
|
||||||
%s
|
%s
|
||||||
%s
|
%s
|
||||||
%s
|
%s
|
||||||
|
|||||||
@@ -26,6 +26,11 @@ const (
|
|||||||
LLGoPackage = "link: $(pkg-config --libs bdw-gc); -lgc"
|
LLGoPackage = "link: $(pkg-config --libs bdw-gc); -lgc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//export initGC
|
||||||
|
func initGC() {
|
||||||
|
Init()
|
||||||
|
}
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
//go:linkname Init C.GC_init
|
//go:linkname Init C.GC_init
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !nogc
|
//go:build !nogc && !baremetal
|
||||||
// +build !nogc
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build nogc
|
//go:build nogc || baremetal
|
||||||
// +build nogc
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !nogc
|
//go:build !nogc && !baremetal
|
||||||
|
|
||||||
package runtime
|
package runtime
|
||||||
|
|
||||||
|
|||||||
9
runtime/internal/lib/runtime/runtime_gc_baremetal.go
Normal file
9
runtime/internal/lib/runtime/runtime_gc_baremetal.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !nogc && baremetal
|
||||||
|
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import "github.com/goplus/llgo/runtime/internal/runtime/tinygogc"
|
||||||
|
|
||||||
|
func GC() {
|
||||||
|
tinygogc.GC()
|
||||||
|
}
|
||||||
537
runtime/internal/runtime/gc_tinygo.go
Normal file
537
runtime/internal/runtime/gc_tinygo.go
Normal file
@@ -0,0 +1,537 @@
|
|||||||
|
//go:build baremetal
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2025 The TinyGo Authors. All rights reserved.
|
||||||
|
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
_ "unsafe"
|
||||||
|
|
||||||
|
c "github.com/goplus/llgo/runtime/internal/clite"
|
||||||
|
"github.com/goplus/llgo/runtime/internal/runtime/tinygogc/memory"
|
||||||
|
)
|
||||||
|
|
||||||
|
const gcDebug = false
|
||||||
|
const needsStaticHeap = true
|
||||||
|
|
||||||
|
// Some globals + constants for the entire GC.
|
||||||
|
|
||||||
|
const (
|
||||||
|
wordsPerBlock = 4 // number of pointers in an allocated block
|
||||||
|
bytesPerBlock = wordsPerBlock * unsafe.Sizeof(memory.HeapStart)
|
||||||
|
stateBits = 2 // how many bits a block state takes (see blockState type)
|
||||||
|
blocksPerStateByte = 8 / stateBits
|
||||||
|
markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provide some abc.Straction over heap blocks.
|
||||||
|
|
||||||
|
// blockState stores the four states in which a block can be. It is two bits in
|
||||||
|
// size.
|
||||||
|
|
||||||
|
const (
|
||||||
|
blockStateFree uint8 = 0 // 00
|
||||||
|
blockStateHead uint8 = 1 // 01
|
||||||
|
blockStateTail uint8 = 2 // 10
|
||||||
|
blockStateMark uint8 = 3 // 11
|
||||||
|
blockStateMask uint8 = 3 // 11
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:linkname getsp llgo.stackSave
|
||||||
|
func getsp() unsafe.Pointer
|
||||||
|
|
||||||
|
func printlnAndPanic(c string) {
|
||||||
|
println(c)
|
||||||
|
panic("")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
nextAlloc uintptr // the next block that should be tried by the allocator
|
||||||
|
endBlock uintptr // the block just past the end of the available space
|
||||||
|
gcTotalAlloc uint64 // total number of bytes allocated
|
||||||
|
gcTotalBlocks uint64 // total number of allocated blocks
|
||||||
|
gcMallocs uint64 // total number of allocations
|
||||||
|
gcFrees uint64 // total number of objects freed
|
||||||
|
gcFreedBlocks uint64 // total number of freed blocks
|
||||||
|
|
||||||
|
// stackOverflow is a flag which is set when the GC scans too deep while marking.
|
||||||
|
// After it is set, all marked allocations must be re-scanned.
|
||||||
|
markStackOverflow bool
|
||||||
|
|
||||||
|
// zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes.
|
||||||
|
zeroSizedAlloc uint8
|
||||||
|
)
|
||||||
|
|
||||||
|
// blockState stores the four states in which a block can be. It is two bits in
|
||||||
|
// size.
|
||||||
|
type blockState uint8
|
||||||
|
|
||||||
|
// The byte value of a block where every block is a 'tail' block.
|
||||||
|
const blockStateByteAllTails = 0 |
|
||||||
|
uint8(blockStateTail<<(stateBits*3)) |
|
||||||
|
uint8(blockStateTail<<(stateBits*2)) |
|
||||||
|
uint8(blockStateTail<<(stateBits*1)) |
|
||||||
|
uint8(blockStateTail<<(stateBits*0))
|
||||||
|
|
||||||
|
// blockFromAddr returns a block given an address somewhere in the heap (which
|
||||||
|
// might not be heap-aligned).
|
||||||
|
func blockFromAddr(addr uintptr) uintptr {
|
||||||
|
if addr < memory.HeapStart || addr >= uintptr(memory.MetadataStart) {
|
||||||
|
printlnAndPanic("gc: trying to get block from invalid address")
|
||||||
|
}
|
||||||
|
return (addr - memory.HeapStart) / bytesPerBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a pointer to the start of the allocated object.
|
||||||
|
func gcPointerOf(blockAddr uintptr) unsafe.Pointer {
|
||||||
|
return unsafe.Pointer(gcAddressOf(blockAddr))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the address of the start of the allocated object.
|
||||||
|
func gcAddressOf(blockAddr uintptr) uintptr {
|
||||||
|
addr := memory.HeapStart + blockAddr*bytesPerBlock
|
||||||
|
if addr > uintptr(memory.MetadataStart) {
|
||||||
|
printlnAndPanic("gc: block pointing inside metadata")
|
||||||
|
}
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// findHead returns the head (first block) of an object, assuming the block
|
||||||
|
// points to an allocated object. It returns the same block if this block
|
||||||
|
// already points to the head.
|
||||||
|
func gcFindHead(blockAddr uintptr) uintptr {
|
||||||
|
for {
|
||||||
|
// Optimization: check whether the current block state byte (which
|
||||||
|
// contains the state of multiple blocks) is composed entirely of tail
|
||||||
|
// blocks. If so, we can skip back to the last block in the previous
|
||||||
|
// state byte.
|
||||||
|
// This optimization speeds up findHead for pointers that point into a
|
||||||
|
// large allocation.
|
||||||
|
stateByte := gcStateByteOf(blockAddr)
|
||||||
|
if stateByte == blockStateByteAllTails {
|
||||||
|
blockAddr -= (blockAddr % blocksPerStateByte) + 1
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether we've found a non-tail block, which means we found the
|
||||||
|
// head.
|
||||||
|
state := gcStateFromByte(blockAddr, stateByte)
|
||||||
|
if state != blockStateTail {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
blockAddr--
|
||||||
|
}
|
||||||
|
if gcStateOf(blockAddr) != blockStateHead && gcStateOf(blockAddr) != blockStateMark {
|
||||||
|
printlnAndPanic("gc: found tail without head")
|
||||||
|
}
|
||||||
|
return blockAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
// findNext returns the first block just past the end of the tail. This may or
|
||||||
|
// may not be the head of an object.
|
||||||
|
func gcFindNext(blockAddr uintptr) uintptr {
|
||||||
|
if gcStateOf(blockAddr) == blockStateHead || gcStateOf(blockAddr) == blockStateMark {
|
||||||
|
blockAddr++
|
||||||
|
}
|
||||||
|
for gcAddressOf(blockAddr) < uintptr(memory.MetadataStart) && gcStateOf(blockAddr) == blockStateTail {
|
||||||
|
blockAddr++
|
||||||
|
}
|
||||||
|
return blockAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func gcStateByteOf(blockAddr uintptr) byte {
|
||||||
|
return *(*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the block state given a state byte. The state byte must have been
|
||||||
|
// obtained using b.stateByte(), otherwise the result is incorrect.
|
||||||
|
func gcStateFromByte(blockAddr uintptr, stateByte byte) uint8 {
|
||||||
|
return uint8(stateByte>>((blockAddr%blocksPerStateByte)*stateBits)) & blockStateMask
|
||||||
|
}
|
||||||
|
|
||||||
|
// State returns the current block state.
|
||||||
|
func gcStateOf(blockAddr uintptr) uint8 {
|
||||||
|
return gcStateFromByte(blockAddr, gcStateByteOf(blockAddr))
|
||||||
|
}
|
||||||
|
|
||||||
|
// setState sets the current block to the given state, which must contain more
|
||||||
|
// bits than the current state. Allowed transitions: from free to any state and
|
||||||
|
// from head to mark.
|
||||||
|
func gcSetState(blockAddr uintptr, newState uint8) {
|
||||||
|
stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte))
|
||||||
|
*stateBytePtr |= uint8(newState << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||||
|
if gcStateOf(blockAddr) != newState {
|
||||||
|
printlnAndPanic("gc: setState() was not successful")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// markFree sets the block state to free, no matter what state it was in before.
|
||||||
|
func gcMarkFree(blockAddr uintptr) {
|
||||||
|
stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte))
|
||||||
|
*stateBytePtr &^= uint8(blockStateMask << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||||
|
if gcStateOf(blockAddr) != blockStateFree {
|
||||||
|
printlnAndPanic("gc: markFree() was not successful")
|
||||||
|
}
|
||||||
|
*(*[wordsPerBlock]uintptr)(unsafe.Pointer(gcAddressOf(blockAddr))) = [wordsPerBlock]uintptr{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmark changes the state of the block from mark to head. It must be marked
|
||||||
|
// before calling this function.
|
||||||
|
func gcUnmark(blockAddr uintptr) {
|
||||||
|
if gcStateOf(blockAddr) != blockStateMark {
|
||||||
|
printlnAndPanic("gc: unmark() on a block that is not marked")
|
||||||
|
}
|
||||||
|
clearMask := blockStateMask ^ blockStateHead // the bits to clear from the state
|
||||||
|
stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte))
|
||||||
|
*stateBytePtr &^= uint8(clearMask << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||||
|
if gcStateOf(blockAddr) != blockStateHead {
|
||||||
|
printlnAndPanic("gc: unmark() was not successful")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isOnHeap(ptr uintptr) bool {
|
||||||
|
return ptr >= memory.HeapStart && ptr < uintptr(memory.MetadataStart)
|
||||||
|
}
|
||||||
|
|
||||||
|
// alloc tries to find some free space on the heap, possibly doing a garbage
|
||||||
|
// collection cycle if needed. If no space is free, it panics.
|
||||||
|
//
|
||||||
|
//go:noinline
|
||||||
|
func alloc(size uintptr) unsafe.Pointer {
|
||||||
|
if size == 0 {
|
||||||
|
return unsafe.Pointer(&zeroSizedAlloc)
|
||||||
|
}
|
||||||
|
gcTotalAlloc += uint64(size)
|
||||||
|
gcMallocs++
|
||||||
|
|
||||||
|
neededBlocks := (size + (bytesPerBlock - 1)) / bytesPerBlock
|
||||||
|
gcTotalBlocks += uint64(neededBlocks)
|
||||||
|
|
||||||
|
// Continue looping until a run of free blocks has been found that fits the
|
||||||
|
// requested size.
|
||||||
|
index := nextAlloc
|
||||||
|
numFreeBlocks := uintptr(0)
|
||||||
|
heapScanCount := uint8(0)
|
||||||
|
for {
|
||||||
|
if index == nextAlloc {
|
||||||
|
if heapScanCount == 0 {
|
||||||
|
heapScanCount = 1
|
||||||
|
} else if heapScanCount == 1 {
|
||||||
|
// The entire heap has been searched for free memory, but none
|
||||||
|
// could be found. Run a garbage collection cycle to reclaim
|
||||||
|
// free memory and try again.
|
||||||
|
heapScanCount = 2
|
||||||
|
freeBytes := GC()
|
||||||
|
heapSize := uintptr(memory.MetadataStart) - memory.HeapStart
|
||||||
|
if freeBytes < heapSize/3 {
|
||||||
|
// Ensure there is at least 33% headroom.
|
||||||
|
// This percentage was arbitrarily chosen, and may need to
|
||||||
|
// be tuned in the future.
|
||||||
|
growHeap()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Even after garbage collection, no free memory could be found.
|
||||||
|
// Try to increase heap size.
|
||||||
|
if growHeap() {
|
||||||
|
// Success, the heap was increased in size. Try again with a
|
||||||
|
// larger heap.
|
||||||
|
} else {
|
||||||
|
// Unfortunately the heap could not be increased. This
|
||||||
|
// happens on baremetal systems for example (where all
|
||||||
|
// available RAM has already been dedicated to the heap).
|
||||||
|
printlnAndPanic("out of memory")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap around the end of the heap.
|
||||||
|
if index == memory.EndBlock {
|
||||||
|
index = 0
|
||||||
|
// Reset numFreeBlocks as allocations cannot wrap.
|
||||||
|
numFreeBlocks = 0
|
||||||
|
// In rare cases, the initial heap might be so small that there are
|
||||||
|
// no blocks at all. In this case, it's better to jump back to the
|
||||||
|
// start of the loop and try again, until the GC realizes there is
|
||||||
|
// no memory and grows the heap.
|
||||||
|
// This can sometimes happen on WebAssembly, where the initial heap
|
||||||
|
// is created by whatever is left on the last memory page.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is the block we're looking at free?
|
||||||
|
if gcStateOf(index) != blockStateFree {
|
||||||
|
// This block is in use. Try again from this point.
|
||||||
|
numFreeBlocks = 0
|
||||||
|
index++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
numFreeBlocks++
|
||||||
|
index++
|
||||||
|
|
||||||
|
// Are we finished?
|
||||||
|
if numFreeBlocks == neededBlocks {
|
||||||
|
// Found a big enough range of free blocks!
|
||||||
|
nextAlloc = index
|
||||||
|
thisAlloc := index - neededBlocks
|
||||||
|
|
||||||
|
// Set the following blocks as being allocated.
|
||||||
|
gcSetState(thisAlloc, blockStateHead)
|
||||||
|
for i := thisAlloc + 1; i != nextAlloc; i++ {
|
||||||
|
gcSetState(i, blockStateTail)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a pointer to this allocation.
|
||||||
|
return gcPointerOf(thisAlloc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||||
|
if ptr == nil {
|
||||||
|
return alloc(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
ptrAddress := uintptr(ptr)
|
||||||
|
endOfTailAddress := gcAddressOf(gcFindNext(blockFromAddr(ptrAddress)))
|
||||||
|
|
||||||
|
// this might be a few bytes longer than the original size of
|
||||||
|
// ptr, because we align to full blocks of size bytesPerBlock
|
||||||
|
oldSize := endOfTailAddress - ptrAddress
|
||||||
|
if size <= oldSize {
|
||||||
|
return ptr
|
||||||
|
}
|
||||||
|
|
||||||
|
newAlloc := alloc(size)
|
||||||
|
c.Memcpy(newAlloc, ptr, oldSize)
|
||||||
|
free(ptr)
|
||||||
|
|
||||||
|
return newAlloc
|
||||||
|
}
|
||||||
|
|
||||||
|
func free(ptr unsafe.Pointer) {
|
||||||
|
// TODO: free blocks on request, when the compiler knows they're unused.
|
||||||
|
}
|
||||||
|
|
||||||
|
// runGC performs a garbage collection cycle. It is the internal implementation
|
||||||
|
// of the runtime.GC() function. The difference is that it returns the number of
|
||||||
|
// free bytes in the heap after the GC is finished.
|
||||||
|
func GC() (freeBytes uintptr) {
|
||||||
|
if gcDebug {
|
||||||
|
println("running collection cycle...")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark phase: mark all reachable objects, recursively.
|
||||||
|
gcMarkReachable()
|
||||||
|
|
||||||
|
finishMark()
|
||||||
|
|
||||||
|
// If we're using threads, resume all other threads before starting the
|
||||||
|
// sweep.
|
||||||
|
gcResumeWorld()
|
||||||
|
|
||||||
|
// Sweep phase: free all non-marked objects and unmark marked objects for
|
||||||
|
// the next collection cycle.
|
||||||
|
freeBytes = sweep()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// markRoots reads all pointers from start to end (exclusive) and if they look
|
||||||
|
// like a heap pointer and are unmarked, marks them and scans that object as
|
||||||
|
// well (recursively). The start and end parameters must be valid pointers and
|
||||||
|
// must be aligned.
|
||||||
|
func markRoots(start, end uintptr) {
|
||||||
|
|
||||||
|
if true {
|
||||||
|
if start >= end {
|
||||||
|
printlnAndPanic("gc: unexpected range to mark")
|
||||||
|
}
|
||||||
|
if start%unsafe.Alignof(start) != 0 {
|
||||||
|
printlnAndPanic("gc: unaligned start pointer")
|
||||||
|
}
|
||||||
|
if end%unsafe.Alignof(end) != 0 {
|
||||||
|
printlnAndPanic("gc: unaligned end pointer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reduce the end bound to avoid reading too far on platforms where pointer alignment is smaller than pointer size.
|
||||||
|
// If the size of the range is 0, then end will be slightly below start after this.
|
||||||
|
end -= unsafe.Sizeof(end) - unsafe.Alignof(end)
|
||||||
|
|
||||||
|
for addr := start; addr < end; addr += unsafe.Alignof(addr) {
|
||||||
|
root := *(*uintptr)(unsafe.Pointer(addr))
|
||||||
|
markRoot(addr, root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startMark starts the marking process on a root and all of its children.
|
||||||
|
func startMark(root uintptr) {
|
||||||
|
var stack [markStackSize]uintptr
|
||||||
|
stack[0] = root
|
||||||
|
gcSetState(root, blockStateMark)
|
||||||
|
stackLen := 1
|
||||||
|
for stackLen > 0 {
|
||||||
|
// Pop a block off of the stack.
|
||||||
|
stackLen--
|
||||||
|
block := stack[stackLen]
|
||||||
|
|
||||||
|
start, end := gcAddressOf(block), gcAddressOf(gcFindNext(block))
|
||||||
|
|
||||||
|
for addr := start; addr != end; addr += unsafe.Alignof(addr) {
|
||||||
|
// Load the word.
|
||||||
|
word := *(*uintptr)(unsafe.Pointer(addr))
|
||||||
|
|
||||||
|
if !isOnHeap(word) {
|
||||||
|
// Not a heap pointer.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the corresponding memory block.
|
||||||
|
referencedBlock := blockFromAddr(word)
|
||||||
|
|
||||||
|
if gcStateOf(referencedBlock) == blockStateFree {
|
||||||
|
// The to-be-marked object doesn't actually exist.
|
||||||
|
// This is probably a false positive.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the block's head.
|
||||||
|
referencedBlock = gcFindHead(referencedBlock)
|
||||||
|
|
||||||
|
if gcStateOf(referencedBlock) == blockStateMark {
|
||||||
|
// The block has already been marked by something else.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark block.
|
||||||
|
|
||||||
|
gcSetState(referencedBlock, blockStateMark)
|
||||||
|
|
||||||
|
println("mark: %lx from %lx", gcPointerOf(referencedBlock), gcPointerOf(root))
|
||||||
|
|
||||||
|
if stackLen == len(stack) {
|
||||||
|
// The stack is full.
|
||||||
|
// It is necessary to rescan all marked blocks once we are done.
|
||||||
|
markStackOverflow = true
|
||||||
|
if gcDebug {
|
||||||
|
println("gc stack overflowed")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push the pointer onto the stack to be scanned later.
|
||||||
|
stack[stackLen] = referencedBlock
|
||||||
|
stackLen++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// finishMark finishes the marking process by processing all stack overflows.
|
||||||
|
func finishMark() {
|
||||||
|
for markStackOverflow {
|
||||||
|
// Re-mark all blocks.
|
||||||
|
markStackOverflow = false
|
||||||
|
for block := uintptr(0); block < memory.EndBlock; block++ {
|
||||||
|
if gcStateOf(block) != blockStateMark {
|
||||||
|
// Block is not marked, so we do not need to rescan it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-mark the block.
|
||||||
|
startMark(block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark a GC root at the address addr.
|
||||||
|
func markRoot(addr, root uintptr) {
|
||||||
|
if isOnHeap(root) {
|
||||||
|
println("on the heap: %lx", gcPointerOf(root))
|
||||||
|
block := blockFromAddr(root)
|
||||||
|
if gcStateOf(block) == blockStateFree {
|
||||||
|
// The to-be-marked object doesn't actually exist.
|
||||||
|
// This could either be a dangling pointer (oops!) but most likely
|
||||||
|
// just a false positive.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
head := gcFindHead(block)
|
||||||
|
|
||||||
|
if gcStateOf(head) != blockStateMark {
|
||||||
|
startMark(head)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sweep goes through all memory and frees unmarked memory.
|
||||||
|
// It returns how many bytes are free in the heap after the sweep.
|
||||||
|
func sweep() (freeBytes uintptr) {
|
||||||
|
freeCurrentObject := false
|
||||||
|
var freed uint64
|
||||||
|
|
||||||
|
var from uintptr
|
||||||
|
for block := uintptr(0); block < memory.EndBlock; block++ {
|
||||||
|
switch gcStateOf(block) {
|
||||||
|
case blockStateHead:
|
||||||
|
// Unmarked head. Free it, including all tail blocks following it.
|
||||||
|
gcMarkFree(block)
|
||||||
|
freeCurrentObject = true
|
||||||
|
gcFrees++
|
||||||
|
freed++
|
||||||
|
from = block
|
||||||
|
case blockStateTail:
|
||||||
|
if freeCurrentObject {
|
||||||
|
// This is a tail object following an unmarked head.
|
||||||
|
// Free it now.
|
||||||
|
gcMarkFree(block)
|
||||||
|
freed++
|
||||||
|
}
|
||||||
|
println("free from %lx to %lx", gcPointerOf(from), gcPointerOf(block))
|
||||||
|
case blockStateMark:
|
||||||
|
// This is a marked object. The next tail blocks must not be freed,
|
||||||
|
// but the mark bit must be removed so the next GC cycle will
|
||||||
|
// collect this object if it is unreferenced then.
|
||||||
|
gcUnmark(block)
|
||||||
|
freeCurrentObject = false
|
||||||
|
case blockStateFree:
|
||||||
|
freeBytes += bytesPerBlock
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gcFreedBlocks += freed
|
||||||
|
freeBytes += uintptr(freed) * bytesPerBlock
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// growHeap tries to grow the heap size. It returns true if it succeeds, false
|
||||||
|
// otherwise.
|
||||||
|
func growHeap() bool {
|
||||||
|
// On baremetal, there is no way the heap can be grown.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func gcMarkReachable() {
|
||||||
|
// a compiler trick to get current SP
|
||||||
|
println("scan stack", unsafe.Pointer(getsp()), unsafe.Pointer(memory.StackTop))
|
||||||
|
markRoots(uintptr(getsp()), memory.StackTop)
|
||||||
|
println("scan global", unsafe.Pointer(memory.GlobalsStart), unsafe.Pointer(memory.GlobalsEnd))
|
||||||
|
|
||||||
|
markRoots(memory.GlobalsStart, memory.GlobalsEnd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func gcResumeWorld() {
|
||||||
|
// Nothing to do here (single threaded).
|
||||||
|
}
|
||||||
9
runtime/internal/runtime/tinygogc/gc.go
Normal file
9
runtime/internal/runtime/tinygogc/gc.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package tinygogc
|
||||||
|
|
||||||
|
import "github.com/goplus/llgo/runtime/internal/runtime"
|
||||||
|
|
||||||
|
const LLGoPackage = "noinit"
|
||||||
|
|
||||||
|
func GC() {
|
||||||
|
runtime.GC()
|
||||||
|
}
|
||||||
71
runtime/internal/runtime/tinygogc/memory/memory.go
Normal file
71
runtime/internal/runtime/tinygogc/memory/memory.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
//go:build baremetal
|
||||||
|
|
||||||
|
package memory
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// no init function, we don't want to init this twice
|
||||||
|
const LLGoPackage = "noinit"
|
||||||
|
|
||||||
|
//go:linkname _heapStart _heapStart
|
||||||
|
var _heapStart [0]byte
|
||||||
|
|
||||||
|
//go:linkname _heapEnd _heapEnd
|
||||||
|
var _heapEnd [0]byte
|
||||||
|
|
||||||
|
//go:linkname _stackStart _stack_top
|
||||||
|
var _stackStart [0]byte
|
||||||
|
|
||||||
|
//go:linkname _globals_start _globals_start
|
||||||
|
var _globals_start [0]byte
|
||||||
|
|
||||||
|
//go:linkname _globals_end _globals_end
|
||||||
|
var _globals_end [0]byte
|
||||||
|
|
||||||
|
// since we don't have an init() function, these should be initalized by initHeap(), which is called by <main> entry
|
||||||
|
var (
|
||||||
|
HeapStart uintptr // start address of heap area
|
||||||
|
HeapEnd uintptr // end address of heap area
|
||||||
|
GlobalsStart uintptr // start address of global variable area
|
||||||
|
GlobalsEnd uintptr // end address of global variable area
|
||||||
|
StackTop uintptr // the top of stack
|
||||||
|
EndBlock uintptr // GC end block index
|
||||||
|
MetadataStart unsafe.Pointer // start address of GC metadata
|
||||||
|
)
|
||||||
|
|
||||||
|
// Some globals + constants for the entire GC.
|
||||||
|
|
||||||
|
const (
|
||||||
|
wordsPerBlock = 4 // number of pointers in an allocated block
|
||||||
|
bytesPerBlock = wordsPerBlock * unsafe.Sizeof(HeapStart)
|
||||||
|
stateBits = 2 // how many bits a block state takes (see blockState type)
|
||||||
|
blocksPerStateByte = 8 / stateBits
|
||||||
|
markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan
|
||||||
|
)
|
||||||
|
|
||||||
|
// zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes.
|
||||||
|
var zeroSizedAlloc uint8
|
||||||
|
|
||||||
|
// when executing initGC(), we must ensure there's no any allocations.
|
||||||
|
// use linking here to avoid import clite
|
||||||
|
//
|
||||||
|
//go:linkname memset C.memset
|
||||||
|
func memset(unsafe.Pointer, int, uintptr)
|
||||||
|
|
||||||
|
// this function MUST be initalized first, which means it's required to be initalized before runtime
|
||||||
|
//
|
||||||
|
//export initGC
|
||||||
|
func initGC() {
|
||||||
|
// reserve 2K blocks for malloc
|
||||||
|
HeapStart = uintptr(unsafe.Pointer(&_heapStart)) + 2048
|
||||||
|
HeapEnd = uintptr(unsafe.Pointer(&_heapEnd))
|
||||||
|
GlobalsStart = uintptr(unsafe.Pointer(&_globals_start))
|
||||||
|
GlobalsEnd = uintptr(unsafe.Pointer(&_globals_end))
|
||||||
|
totalSize := HeapEnd - HeapStart
|
||||||
|
metadataSize := (totalSize + blocksPerStateByte*bytesPerBlock) / (1 + blocksPerStateByte*bytesPerBlock)
|
||||||
|
MetadataStart = unsafe.Pointer(HeapEnd - metadataSize)
|
||||||
|
EndBlock = (uintptr(MetadataStart) - HeapStart) / bytesPerBlock
|
||||||
|
StackTop = uintptr(unsafe.Pointer(&_stackStart))
|
||||||
|
|
||||||
|
memset(MetadataStart, 0, metadataSize)
|
||||||
|
}
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !nogc
|
//go:build !nogc && !baremetal
|
||||||
// +build !nogc
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||||
|
|||||||
36
runtime/internal/runtime/z_gc_baremetal.go
Normal file
36
runtime/internal/runtime/z_gc_baremetal.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
//go:build !nogc && baremetal
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
c "github.com/goplus/llgo/runtime/internal/clite"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AllocU allocates uninitialized memory.
|
||||||
|
func AllocU(size uintptr) unsafe.Pointer {
|
||||||
|
return alloc(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllocZ allocates zero-initialized memory.
|
||||||
|
func AllocZ(size uintptr) unsafe.Pointer {
|
||||||
|
ptr := alloc(size)
|
||||||
|
return c.Memset(ptr, 0, size)
|
||||||
|
}
|
||||||
@@ -1,6 +1,4 @@
|
|||||||
__stack = ORIGIN(dram_seg) + LENGTH(dram_seg);
|
_heapEnd = ORIGIN(dram_seg) + LENGTH(dram_seg);
|
||||||
__MIN_STACK_SIZE = 0x1000;
|
|
||||||
_stack_top = __stack;
|
|
||||||
|
|
||||||
/* Default entry point */
|
/* Default entry point */
|
||||||
ENTRY(_start)
|
ENTRY(_start)
|
||||||
@@ -104,6 +102,14 @@ SECTIONS
|
|||||||
. += ORIGIN(iram_seg) == ORIGIN(dram_seg) ? 0 : _iram_end - _iram_start;
|
. += ORIGIN(iram_seg) == ORIGIN(dram_seg) ? 0 : _iram_end - _iram_start;
|
||||||
} > dram_seg
|
} > dram_seg
|
||||||
|
|
||||||
|
.stack (NOLOAD) :
|
||||||
|
{
|
||||||
|
__stack_end = .;
|
||||||
|
. = ALIGN(16);
|
||||||
|
. += 16K;
|
||||||
|
__stack = .;
|
||||||
|
}
|
||||||
|
|
||||||
.data :
|
.data :
|
||||||
{
|
{
|
||||||
_data_start = .;
|
_data_start = .;
|
||||||
@@ -148,7 +154,7 @@ SECTIONS
|
|||||||
} > dram_seg
|
} > dram_seg
|
||||||
|
|
||||||
/* Check if data + heap + stack exceeds RAM limit */
|
/* Check if data + heap + stack exceeds RAM limit */
|
||||||
ASSERT(_end <= __stack - __MIN_STACK_SIZE, "region DRAM overflowed by .data and .bss sections")
|
ASSERT(_end <= _heapEnd, "region DRAM overflowed by .data and .bss sections")
|
||||||
|
|
||||||
/* Stabs debugging sections. */
|
/* Stabs debugging sections. */
|
||||||
.stab 0 : { *(.stab) }
|
.stab 0 : { *(.stab) }
|
||||||
@@ -193,3 +199,8 @@ SECTIONS
|
|||||||
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
|
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
|
||||||
/DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
|
/DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_globals_start = _data_start;
|
||||||
|
_globals_end = _end;
|
||||||
|
_heapStart = _end;
|
||||||
|
_stack_top = __stack;
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
__stack = ORIGIN(dram_seg) + LENGTH(dram_seg);
|
_heapEnd = ORIGIN(dram_seg) + LENGTH(dram_seg);
|
||||||
__MIN_STACK_SIZE = 0x2000;
|
|
||||||
|
|
||||||
ENTRY(_start)
|
ENTRY(_start)
|
||||||
SECTIONS
|
SECTIONS
|
||||||
@@ -26,6 +25,14 @@ SECTIONS
|
|||||||
the same address within the page on the next page up. */
|
the same address within the page on the next page up. */
|
||||||
. = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
|
. = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
|
||||||
|
|
||||||
|
.stack (NOLOAD) :
|
||||||
|
{
|
||||||
|
__stack_end = .;
|
||||||
|
. = ALIGN(16);
|
||||||
|
. += 16K;
|
||||||
|
__stack = .;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
.rodata :
|
.rodata :
|
||||||
{
|
{
|
||||||
@@ -116,7 +123,7 @@ SECTIONS
|
|||||||
. = DATA_SEGMENT_END (.);
|
. = DATA_SEGMENT_END (.);
|
||||||
|
|
||||||
/* Check if data + heap + stack exceeds RAM limit */
|
/* Check if data + heap + stack exceeds RAM limit */
|
||||||
ASSERT(. <= __stack - __MIN_STACK_SIZE, "region DRAM overflowed by .data and .bss sections")
|
ASSERT(. <= _heapEnd, "region DRAM overflowed by .data and .bss sections")
|
||||||
|
|
||||||
/* Stabs debugging sections. */
|
/* Stabs debugging sections. */
|
||||||
.stab 0 : { *(.stab) }
|
.stab 0 : { *(.stab) }
|
||||||
@@ -165,4 +172,7 @@ SECTIONS
|
|||||||
|
|
||||||
_sbss = __bss_start;
|
_sbss = __bss_start;
|
||||||
_ebss = _end;
|
_ebss = _end;
|
||||||
|
_globals_start = _data_start;
|
||||||
|
_globals_end = _end;
|
||||||
|
_heapStart = _end;
|
||||||
|
_stack_top = __stack;
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ MEMORY
|
|||||||
/* 64k at the end of DRAM, after ROM bootloader stack
|
/* 64k at the end of DRAM, after ROM bootloader stack
|
||||||
* or entire DRAM (for QEMU only)
|
* or entire DRAM (for QEMU only)
|
||||||
*/
|
*/
|
||||||
dram_seg (RW) : org = 0x3FFF0000 ,
|
dram_seg (RW) : org = 0x3ffae000 ,
|
||||||
len = 0x10000
|
len = 0x52000
|
||||||
}
|
}
|
||||||
|
|
||||||
INCLUDE "targets/esp32.app.elf.ld";
|
INCLUDE "targets/esp32.app.elf.ld";
|
||||||
|
|||||||
Reference in New Issue
Block a user