Merge pull request #1309 from MeteorsLiu/impl-baremetal-gc
feat: implement baremetal gc
This commit is contained in:
8
.github/workflows/llgo.yml
vendored
8
.github/workflows/llgo.yml
vendored
@@ -61,7 +61,7 @@ jobs:
|
||||
if ${{ startsWith(matrix.os, 'macos') }}; then
|
||||
DEMO_PKG="cargs_darwin_arm64.zip"
|
||||
else
|
||||
DEMO_PKG="cargs_linux_amd64.zip"
|
||||
DEMO_PKG="cargs_linux_amd64.zip"
|
||||
fi
|
||||
|
||||
mkdir -p ./_demo/c/cargs/libs
|
||||
@@ -186,11 +186,15 @@ jobs:
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{matrix.go}}
|
||||
|
||||
- name: Test Baremetal GC
|
||||
if: ${{!startsWith(matrix.os, 'macos')}}
|
||||
working-directory: runtime/internal/runtime/tinygogc
|
||||
run: llgo test -tags testGC .
|
||||
- name: run llgo test
|
||||
run: |
|
||||
llgo test ./...
|
||||
|
||||
|
||||
hello:
|
||||
continue-on-error: true
|
||||
timeout-minutes: 30
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !nogc
|
||||
// +build !nogc
|
||||
//go:build !nogc && !baremetal
|
||||
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build nogc
|
||||
// +build nogc
|
||||
//go:build nogc || baremetal
|
||||
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build llgo
|
||||
//go:build llgo && !baremetal
|
||||
|
||||
/*
|
||||
* Copyright (c) 2025 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build llgo && !nogc
|
||||
//go:build llgo && !baremetal && !nogc
|
||||
|
||||
/*
|
||||
* Copyright (c) 2025 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build llgo && nogc
|
||||
//go:build llgo && (nogc || baremetal)
|
||||
|
||||
/*
|
||||
* Copyright (c) 2025 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !llgo
|
||||
//go:build !llgo || baremetal
|
||||
|
||||
/*
|
||||
* Copyright (c) 2025 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import "runtime"
|
||||
|
||||
// Layout of in-memory per-function information prepared by linker
|
||||
// See https://golang.org/s/go12symtab.
|
||||
// Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab)
|
||||
@@ -30,10 +28,6 @@ func StopTrace() {
|
||||
panic("todo: runtime.StopTrace")
|
||||
}
|
||||
|
||||
func ReadMemStats(m *runtime.MemStats) {
|
||||
panic("todo: runtime.ReadMemStats")
|
||||
}
|
||||
|
||||
func SetMutexProfileFraction(rate int) int {
|
||||
panic("todo: runtime.SetMutexProfileFraction")
|
||||
}
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
//go:build !nogc
|
||||
//go:build !nogc && !baremetal
|
||||
|
||||
package runtime
|
||||
|
||||
import "github.com/goplus/llgo/runtime/internal/clite/bdwgc"
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/goplus/llgo/runtime/internal/clite/bdwgc"
|
||||
)
|
||||
|
||||
func ReadMemStats(m *runtime.MemStats) {
|
||||
panic("todo: runtime.ReadMemStats")
|
||||
}
|
||||
|
||||
func GC() {
|
||||
bdwgc.Gcollect()
|
||||
|
||||
29
runtime/internal/lib/runtime/runtime_gc_baremetal.go
Normal file
29
runtime/internal/lib/runtime/runtime_gc_baremetal.go
Normal file
@@ -0,0 +1,29 @@
|
||||
//go:build !nogc && baremetal
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/goplus/llgo/runtime/internal/runtime/tinygogc"
|
||||
)
|
||||
|
||||
func ReadMemStats(m *runtime.MemStats) {
|
||||
stats := tinygogc.ReadGCStats()
|
||||
m.Alloc = stats.Alloc
|
||||
m.TotalAlloc = stats.TotalAlloc
|
||||
m.Sys = stats.Sys
|
||||
m.Mallocs = stats.Mallocs
|
||||
m.Frees = stats.Frees
|
||||
m.HeapAlloc = stats.HeapAlloc
|
||||
m.HeapSys = stats.HeapSys
|
||||
m.HeapIdle = stats.HeapIdle
|
||||
m.HeapInuse = stats.HeapInuse
|
||||
m.StackInuse = stats.StackInuse
|
||||
m.StackSys = stats.StackSys
|
||||
m.GCSys = stats.GCSys
|
||||
}
|
||||
|
||||
func GC() {
|
||||
tinygogc.GC()
|
||||
}
|
||||
184
runtime/internal/runtime/tinygogc/gc.go
Normal file
184
runtime/internal/runtime/tinygogc/gc.go
Normal file
@@ -0,0 +1,184 @@
|
||||
//go:build baremetal || testGC
|
||||
|
||||
package tinygogc
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type GCStats struct {
|
||||
// General statistics.
|
||||
|
||||
// Alloc is bytes of allocated heap objects.
|
||||
//
|
||||
// This is the same as HeapAlloc (see below).
|
||||
Alloc uint64
|
||||
|
||||
// TotalAlloc is cumulative bytes allocated for heap objects.
|
||||
//
|
||||
// TotalAlloc increases as heap objects are allocated, but
|
||||
// unlike Alloc and HeapAlloc, it does not decrease when
|
||||
// objects are freed.
|
||||
TotalAlloc uint64
|
||||
|
||||
// Sys is the total bytes of memory obtained from the OS.
|
||||
//
|
||||
// Sys is the sum of the XSys fields below. Sys measures the
|
||||
// virtual address space reserved by the Go runtime for the
|
||||
// heap, stacks, and other internal data structures. It's
|
||||
// likely that not all of the virtual address space is backed
|
||||
// by physical memory at any given moment, though in general
|
||||
// it all was at some point.
|
||||
Sys uint64
|
||||
|
||||
// Mallocs is the cumulative count of heap objects allocated.
|
||||
// The number of live objects is Mallocs - Frees.
|
||||
Mallocs uint64
|
||||
|
||||
// Frees is the cumulative count of heap objects freed.
|
||||
Frees uint64
|
||||
|
||||
// Heap memory statistics.
|
||||
//
|
||||
// Interpreting the heap statistics requires some knowledge of
|
||||
// how Go organizes memory. Go divides the virtual address
|
||||
// space of the heap into "spans", which are contiguous
|
||||
// regions of memory 8K or larger. A span may be in one of
|
||||
// three states:
|
||||
//
|
||||
// An "idle" span contains no objects or other data. The
|
||||
// physical memory backing an idle span can be released back
|
||||
// to the OS (but the virtual address space never is), or it
|
||||
// can be converted into an "in use" or "stack" span.
|
||||
//
|
||||
// An "in use" span contains at least one heap object and may
|
||||
// have free space available to allocate more heap objects.
|
||||
//
|
||||
// A "stack" span is used for goroutine stacks. Stack spans
|
||||
// are not considered part of the heap. A span can change
|
||||
// between heap and stack memory; it is never used for both
|
||||
// simultaneously.
|
||||
|
||||
// HeapAlloc is bytes of allocated heap objects.
|
||||
//
|
||||
// "Allocated" heap objects include all reachable objects, as
|
||||
// well as unreachable objects that the garbage collector has
|
||||
// not yet freed. Specifically, HeapAlloc increases as heap
|
||||
// objects are allocated and decreases as the heap is swept
|
||||
// and unreachable objects are freed. Sweeping occurs
|
||||
// incrementally between GC cycles, so these two processes
|
||||
// occur simultaneously, and as a result HeapAlloc tends to
|
||||
// change smoothly (in contrast with the sawtooth that is
|
||||
// typical of stop-the-world garbage collectors).
|
||||
HeapAlloc uint64
|
||||
|
||||
// HeapSys is bytes of heap memory obtained from the OS.
|
||||
//
|
||||
// HeapSys measures the amount of virtual address space
|
||||
// reserved for the heap. This includes virtual address space
|
||||
// that has been reserved but not yet used, which consumes no
|
||||
// physical memory, but tends to be small, as well as virtual
|
||||
// address space for which the physical memory has been
|
||||
// returned to the OS after it became unused (see HeapReleased
|
||||
// for a measure of the latter).
|
||||
//
|
||||
// HeapSys estimates the largest size the heap has had.
|
||||
HeapSys uint64
|
||||
|
||||
// HeapIdle is bytes in idle (unused) spans.
|
||||
//
|
||||
// Idle spans have no objects in them. These spans could be
|
||||
// (and may already have been) returned to the OS, or they can
|
||||
// be reused for heap allocations, or they can be reused as
|
||||
// stack memory.
|
||||
//
|
||||
// HeapIdle minus HeapReleased estimates the amount of memory
|
||||
// that could be returned to the OS, but is being retained by
|
||||
// the runtime so it can grow the heap without requesting more
|
||||
// memory from the OS. If this difference is significantly
|
||||
// larger than the heap size, it indicates there was a recent
|
||||
// transient spike in live heap size.
|
||||
HeapIdle uint64
|
||||
|
||||
// HeapInuse is bytes in in-use spans.
|
||||
//
|
||||
// In-use spans have at least one object in them. These spans
|
||||
// can only be used for other objects of roughly the same
|
||||
// size.
|
||||
//
|
||||
// HeapInuse minus HeapAlloc estimates the amount of memory
|
||||
// that has been dedicated to particular size classes, but is
|
||||
// not currently being used. This is an upper bound on
|
||||
// fragmentation, but in general this memory can be reused
|
||||
// efficiently.
|
||||
HeapInuse uint64
|
||||
|
||||
// Stack memory statistics.
|
||||
//
|
||||
// Stacks are not considered part of the heap, but the runtime
|
||||
// can reuse a span of heap memory for stack memory, and
|
||||
// vice-versa.
|
||||
|
||||
// StackInuse is bytes in stack spans.
|
||||
//
|
||||
// In-use stack spans have at least one stack in them. These
|
||||
// spans can only be used for other stacks of the same size.
|
||||
//
|
||||
// There is no StackIdle because unused stack spans are
|
||||
// returned to the heap (and hence counted toward HeapIdle).
|
||||
StackInuse uint64
|
||||
|
||||
// StackSys is bytes of stack memory obtained from the OS.
|
||||
//
|
||||
// StackSys is StackInuse, plus any memory obtained directly
|
||||
// from the OS for OS thread stacks.
|
||||
//
|
||||
// In non-cgo programs this metric is currently equal to StackInuse
|
||||
// (but this should not be relied upon, and the value may change in
|
||||
// the future).
|
||||
//
|
||||
// In cgo programs this metric includes OS thread stacks allocated
|
||||
// directly from the OS. Currently, this only accounts for one stack in
|
||||
// c-shared and c-archive build modes and other sources of stacks from
|
||||
// the OS (notably, any allocated by C code) are not currently measured.
|
||||
// Note this too may change in the future.
|
||||
StackSys uint64
|
||||
|
||||
// GCSys is bytes of memory in garbage collection metadata.
|
||||
GCSys uint64
|
||||
}
|
||||
|
||||
func ReadGCStats() GCStats {
|
||||
var heapInuse, heapIdle uint64
|
||||
|
||||
lock(&gcMutex)
|
||||
|
||||
for block := uintptr(0); block < endBlock; block++ {
|
||||
bstate := gcStateOf(block)
|
||||
if bstate == blockStateFree {
|
||||
heapIdle += uint64(bytesPerBlock)
|
||||
} else {
|
||||
heapInuse += uint64(bytesPerBlock)
|
||||
}
|
||||
}
|
||||
|
||||
stackEnd := uintptr(unsafe.Pointer(&_stackEnd))
|
||||
stackSys := stackTop - stackEnd
|
||||
|
||||
stats := GCStats{
|
||||
Alloc: (gcTotalBlocks - gcFreedBlocks) * uint64(bytesPerBlock),
|
||||
TotalAlloc: gcTotalAlloc,
|
||||
Sys: uint64(heapEnd - heapStart),
|
||||
Mallocs: gcMallocs,
|
||||
Frees: gcFrees,
|
||||
HeapAlloc: (gcTotalBlocks - gcFreedBlocks) * uint64(bytesPerBlock),
|
||||
HeapSys: heapInuse + heapIdle,
|
||||
HeapIdle: heapIdle,
|
||||
HeapInuse: heapInuse,
|
||||
StackInuse: uint64(stackTop - uintptr(getsp())),
|
||||
StackSys: uint64(stackSys),
|
||||
GCSys: uint64(heapEnd - uintptr(metadataStart)),
|
||||
}
|
||||
|
||||
unlock(&gcMutex)
|
||||
|
||||
return stats
|
||||
}
|
||||
54
runtime/internal/runtime/tinygogc/gc_link.go
Normal file
54
runtime/internal/runtime/tinygogc/gc_link.go
Normal file
@@ -0,0 +1,54 @@
|
||||
//go:build !testGC
|
||||
|
||||
package tinygogc
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
// LLGoPackage instructs the LLGo linker to wrap C standard library memory allocation
|
||||
// functions (malloc, realloc, calloc) so they use the tinygogc allocator instead.
|
||||
// This ensures all memory allocations go through the GC, including C library calls.
|
||||
const LLGoPackage = "link: --wrap=malloc --wrap=realloc --wrap=calloc"
|
||||
|
||||
//export __wrap_malloc
|
||||
func __wrap_malloc(size uintptr) unsafe.Pointer {
|
||||
return Alloc(size)
|
||||
}
|
||||
|
||||
//export __wrap_calloc
|
||||
func __wrap_calloc(nmemb, size uintptr) unsafe.Pointer {
|
||||
totalSize := nmemb * size
|
||||
// Check for multiplication overflow
|
||||
if nmemb != 0 && totalSize/nmemb != size {
|
||||
return nil // Overflow
|
||||
}
|
||||
return Alloc(totalSize)
|
||||
}
|
||||
|
||||
//export __wrap_realloc
|
||||
func __wrap_realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||
return Realloc(ptr, size)
|
||||
}
|
||||
|
||||
//go:linkname getsp llgo.stackSave
|
||||
func getsp() unsafe.Pointer
|
||||
|
||||
//go:linkname _heapStart _heapStart
|
||||
var _heapStart [0]byte
|
||||
|
||||
//go:linkname _heapEnd _heapEnd
|
||||
var _heapEnd [0]byte
|
||||
|
||||
//go:linkname _stackStart _stack_top
|
||||
var _stackStart [0]byte
|
||||
|
||||
//go:linkname _stackEnd _stack_end
|
||||
var _stackEnd [0]byte
|
||||
|
||||
//go:linkname _globals_start _globals_start
|
||||
var _globals_start [0]byte
|
||||
|
||||
//go:linkname _globals_end _globals_end
|
||||
var _globals_end [0]byte
|
||||
25
runtime/internal/runtime/tinygogc/gc_test.go
Normal file
25
runtime/internal/runtime/tinygogc/gc_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build testGC
|
||||
|
||||
package tinygogc
|
||||
|
||||
import (
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
var currentStack uintptr
|
||||
|
||||
func getsp() uintptr {
|
||||
return currentStack
|
||||
}
|
||||
|
||||
var _heapStart [0]byte
|
||||
|
||||
var _heapEnd [0]byte
|
||||
|
||||
var _stackStart [0]byte
|
||||
|
||||
var _stackEnd [0]byte
|
||||
|
||||
var _globals_start [0]byte
|
||||
|
||||
var _globals_end [0]byte
|
||||
570
runtime/internal/runtime/tinygogc/gc_tinygo.go
Normal file
570
runtime/internal/runtime/tinygogc/gc_tinygo.go
Normal file
@@ -0,0 +1,570 @@
|
||||
//go:build baremetal || testGC
|
||||
|
||||
/*
|
||||
* Copyright (c) 2018-2025 The TinyGo Authors. All rights reserved.
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Package tinygogc implements a conservative mark-and-sweep garbage collector
|
||||
// for baremetal environments where the standard Go runtime and bdwgc are unavailable.
|
||||
//
|
||||
// This implementation is based on TinyGo's GC and is designed for resource-constrained
|
||||
// embedded systems. It uses a block-based allocator with conservative pointer scanning.
|
||||
//
|
||||
// Build tags:
|
||||
// - baremetal: Enables this GC for baremetal targets
|
||||
// - testGC: Enables testing mode with mock implementations
|
||||
//
|
||||
// Memory Layout:
|
||||
// The heap is divided into fixed-size blocks (32 bytes on 64-bit). Metadata is stored
|
||||
// at the end of the heap, using 2 bits per block to track state (free/head/tail/mark).
|
||||
package tinygogc
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
c "github.com/goplus/llgo/runtime/internal/clite"
|
||||
)
|
||||
|
||||
const gcDebug = false
|
||||
|
||||
// blockState stores the four states in which a block can be. It is two bits in
|
||||
// size.
|
||||
const (
|
||||
blockStateFree uint8 = 0 // 00
|
||||
blockStateHead uint8 = 1 // 01
|
||||
blockStateTail uint8 = 2 // 10
|
||||
blockStateMark uint8 = 3 // 11
|
||||
blockStateMask uint8 = 3 // 11
|
||||
)
|
||||
|
||||
// The byte value of a block where every block is a 'tail' block.
|
||||
const blockStateByteAllTails = 0 |
|
||||
uint8(blockStateTail<<(stateBits*3)) |
|
||||
uint8(blockStateTail<<(stateBits*2)) |
|
||||
uint8(blockStateTail<<(stateBits*1)) |
|
||||
uint8(blockStateTail<<(stateBits*0))
|
||||
|
||||
var (
|
||||
heapStart uintptr // start address of heap area
|
||||
heapEnd uintptr // end address of heap area
|
||||
globalsStart uintptr // start address of global variable area
|
||||
globalsEnd uintptr // end address of global variable area
|
||||
stackTop uintptr // the top of stack
|
||||
endBlock uintptr // GC end block index
|
||||
metadataStart unsafe.Pointer // start address of GC metadata
|
||||
|
||||
nextAlloc uintptr // the next block that should be tried by the allocator
|
||||
gcTotalAlloc uint64 // total number of bytes allocated
|
||||
gcTotalBlocks uint64 // total number of allocated blocks
|
||||
gcMallocs uint64 // total number of allocations
|
||||
gcFrees uint64 // total number of objects freed
|
||||
gcFreedBlocks uint64 // total number of freed blocks
|
||||
|
||||
// stackOverflow is a flag which is set when the GC scans too deep while marking.
|
||||
// After it is set, all marked allocations must be re-scanned.
|
||||
markStackOverflow bool
|
||||
|
||||
// zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes.
|
||||
zeroSizedAlloc uint8
|
||||
|
||||
gcMutex mutex // gcMutex protects GC related variables
|
||||
isGCInit bool // isGCInit indicates GC initialization state
|
||||
)
|
||||
|
||||
// Some globals + constants for the entire GC.
|
||||
|
||||
const (
|
||||
wordsPerBlock = 4 // number of pointers in an allocated block
|
||||
bytesPerBlock = wordsPerBlock * unsafe.Sizeof(heapStart)
|
||||
stateBits = 2 // how many bits a block state takes (see blockState type)
|
||||
blocksPerStateByte = 8 / stateBits
|
||||
markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan
|
||||
)
|
||||
|
||||
// this function MUST be initalized first, which means it's required to be initalized before runtime
|
||||
func initGC() {
|
||||
// reserve 2K blocks for libc internal malloc, we cannot wrap those internal functions
|
||||
heapStart = uintptr(unsafe.Pointer(&_heapStart)) + 2048
|
||||
heapEnd = uintptr(unsafe.Pointer(&_heapEnd))
|
||||
globalsStart = uintptr(unsafe.Pointer(&_globals_start))
|
||||
globalsEnd = uintptr(unsafe.Pointer(&_globals_end))
|
||||
totalSize := heapEnd - heapStart
|
||||
metadataSize := (totalSize + blocksPerStateByte*bytesPerBlock) / (1 + blocksPerStateByte*bytesPerBlock)
|
||||
metadataStart = unsafe.Pointer(heapEnd - metadataSize)
|
||||
endBlock = (uintptr(metadataStart) - heapStart) / bytesPerBlock
|
||||
stackTop = uintptr(unsafe.Pointer(&_stackStart))
|
||||
|
||||
c.Memset(metadataStart, 0, metadataSize)
|
||||
}
|
||||
|
||||
func lazyInit() {
|
||||
if !isGCInit {
|
||||
initGC()
|
||||
isGCInit = true
|
||||
}
|
||||
}
|
||||
|
||||
func gcPanic(s *c.Char) {
|
||||
c.Printf(c.Str("%s"), s)
|
||||
c.Exit(2)
|
||||
}
|
||||
|
||||
// blockFromAddr returns a block given an address somewhere in the heap (which
|
||||
// might not be heap-aligned).
|
||||
func blockFromAddr(addr uintptr) uintptr {
|
||||
if addr < heapStart || addr >= uintptr(metadataStart) {
|
||||
gcPanic(c.Str("gc: trying to get block from invalid address"))
|
||||
}
|
||||
return (addr - heapStart) / bytesPerBlock
|
||||
}
|
||||
|
||||
// Return a pointer to the start of the allocated object.
|
||||
func gcPointerOf(blockAddr uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(gcAddressOf(blockAddr))
|
||||
}
|
||||
|
||||
// Return the address of the start of the allocated object.
|
||||
func gcAddressOf(blockAddr uintptr) uintptr {
|
||||
addr := heapStart + blockAddr*bytesPerBlock
|
||||
if addr > uintptr(metadataStart) {
|
||||
gcPanic(c.Str("gc: block pointing inside metadata"))
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
// findHead returns the head (first block) of an object, assuming the block
|
||||
// points to an allocated object. It returns the same block if this block
|
||||
// already points to the head.
|
||||
func gcFindHead(blockAddr uintptr) uintptr {
|
||||
for {
|
||||
// Optimization: check whether the current block state byte (which
|
||||
// contains the state of multiple blocks) is composed entirely of tail
|
||||
// blocks. If so, we can skip back to the last block in the previous
|
||||
// state byte.
|
||||
// This optimization speeds up findHead for pointers that point into a
|
||||
// large allocation.
|
||||
stateByte := gcStateByteOf(blockAddr)
|
||||
if stateByte == blockStateByteAllTails {
|
||||
blockAddr -= (blockAddr % blocksPerStateByte) + 1
|
||||
continue
|
||||
}
|
||||
|
||||
// Check whether we've found a non-tail block, which means we found the
|
||||
// head.
|
||||
state := gcStateFromByte(blockAddr, stateByte)
|
||||
if state != blockStateTail {
|
||||
break
|
||||
}
|
||||
blockAddr--
|
||||
}
|
||||
if gcStateOf(blockAddr) != blockStateHead && gcStateOf(blockAddr) != blockStateMark {
|
||||
gcPanic(c.Str("gc: found tail without head"))
|
||||
}
|
||||
return blockAddr
|
||||
}
|
||||
|
||||
// findNext returns the first block just past the end of the tail. This may or
|
||||
// may not be the head of an object.
|
||||
func gcFindNext(blockAddr uintptr) uintptr {
|
||||
if gcStateOf(blockAddr) == blockStateHead || gcStateOf(blockAddr) == blockStateMark {
|
||||
blockAddr++
|
||||
}
|
||||
for gcAddressOf(blockAddr) < uintptr(metadataStart) && gcStateOf(blockAddr) == blockStateTail {
|
||||
blockAddr++
|
||||
}
|
||||
return blockAddr
|
||||
}
|
||||
|
||||
func gcStateByteOf(blockAddr uintptr) byte {
|
||||
return *(*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte))
|
||||
}
|
||||
|
||||
// Return the block state given a state byte. The state byte must have been
|
||||
// obtained using b.stateByte(), otherwise the result is incorrect.
|
||||
func gcStateFromByte(blockAddr uintptr, stateByte byte) uint8 {
|
||||
return uint8(stateByte>>((blockAddr%blocksPerStateByte)*stateBits)) & blockStateMask
|
||||
}
|
||||
|
||||
// State returns the current block state.
|
||||
func gcStateOf(blockAddr uintptr) uint8 {
|
||||
return gcStateFromByte(blockAddr, gcStateByteOf(blockAddr))
|
||||
}
|
||||
|
||||
// setState sets the current block to the given state, which must contain more
|
||||
// bits than the current state. Allowed transitions: from free to any state and
|
||||
// from head to mark.
|
||||
func gcSetState(blockAddr uintptr, newState uint8) {
|
||||
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte))
|
||||
*stateBytePtr |= uint8(newState << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||
if gcStateOf(blockAddr) != newState {
|
||||
gcPanic(c.Str("gc: setState() was not successful"))
|
||||
}
|
||||
}
|
||||
|
||||
// markFree sets the block state to free, no matter what state it was in before.
|
||||
func gcMarkFree(blockAddr uintptr) {
|
||||
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte))
|
||||
*stateBytePtr &^= uint8(blockStateMask << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||
if gcStateOf(blockAddr) != blockStateFree {
|
||||
gcPanic(c.Str("gc: markFree() was not successful"))
|
||||
}
|
||||
*(*[wordsPerBlock]uintptr)(unsafe.Pointer(gcAddressOf(blockAddr))) = [wordsPerBlock]uintptr{}
|
||||
}
|
||||
|
||||
// unmark changes the state of the block from mark to head. It must be marked
|
||||
// before calling this function.
|
||||
func gcUnmark(blockAddr uintptr) {
|
||||
if gcStateOf(blockAddr) != blockStateMark {
|
||||
gcPanic(c.Str("gc: unmark() on a block that is not marked"))
|
||||
}
|
||||
clearMask := blockStateMask ^ blockStateHead // the bits to clear from the state
|
||||
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte))
|
||||
*stateBytePtr &^= uint8(clearMask << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||
if gcStateOf(blockAddr) != blockStateHead {
|
||||
gcPanic(c.Str("gc: unmark() was not successful"))
|
||||
}
|
||||
}
|
||||
|
||||
func isOnHeap(ptr uintptr) bool {
|
||||
return ptr >= heapStart && ptr < uintptr(metadataStart)
|
||||
}
|
||||
|
||||
func isPointer(ptr uintptr) bool {
|
||||
// TODO: implement precise GC
|
||||
return isOnHeap(ptr)
|
||||
}
|
||||
|
||||
// alloc tries to find some free space on the heap, possibly doing a garbage
|
||||
// collection cycle if needed. If no space is free, it panics.
|
||||
//
|
||||
//go:noinline
|
||||
func Alloc(size uintptr) unsafe.Pointer {
|
||||
if size == 0 {
|
||||
return unsafe.Pointer(&zeroSizedAlloc)
|
||||
}
|
||||
lock(&gcMutex)
|
||||
lazyInit()
|
||||
|
||||
gcTotalAlloc += uint64(size)
|
||||
gcMallocs++
|
||||
|
||||
neededBlocks := (size + (bytesPerBlock - 1)) / bytesPerBlock
|
||||
gcTotalBlocks += uint64(neededBlocks)
|
||||
|
||||
// Continue looping until a run of free blocks has been found that fits the
|
||||
// requested size.
|
||||
index := nextAlloc
|
||||
numFreeBlocks := uintptr(0)
|
||||
heapScanCount := uint8(0)
|
||||
for {
|
||||
if index == nextAlloc {
|
||||
if heapScanCount == 0 {
|
||||
heapScanCount = 1
|
||||
} else if heapScanCount == 1 {
|
||||
// The entire heap has been searched for free memory, but none
|
||||
// could be found. Run a garbage collection cycle to reclaim
|
||||
// free memory and try again.
|
||||
heapScanCount = 2
|
||||
freeBytes := gc()
|
||||
heapSize := uintptr(metadataStart) - heapStart
|
||||
if freeBytes < heapSize/3 {
|
||||
// Ensure there is at least 33% headroom.
|
||||
// This percentage was arbitrarily chosen, and may need to
|
||||
// be tuned in the future.
|
||||
growHeap()
|
||||
}
|
||||
} else {
|
||||
// Even after garbage collection, no free memory could be found.
|
||||
// Try to increase heap size.
|
||||
if growHeap() {
|
||||
// Success, the heap was increased in size. Try again with a
|
||||
// larger heap.
|
||||
} else {
|
||||
// Unfortunately the heap could not be increased. This
|
||||
// happens on baremetal systems for example (where all
|
||||
// available RAM has already been dedicated to the heap).
|
||||
gcPanic(c.Str("out of memory"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap around the end of the heap.
|
||||
if index == endBlock {
|
||||
index = 0
|
||||
// Reset numFreeBlocks as allocations cannot wrap.
|
||||
numFreeBlocks = 0
|
||||
// In rare cases, the initial heap might be so small that there are
|
||||
// no blocks at all. In this case, it's better to jump back to the
|
||||
// start of the loop and try again, until the GC realizes there is
|
||||
// no memory and grows the heap.
|
||||
// This can sometimes happen on WebAssembly, where the initial heap
|
||||
// is created by whatever is left on the last memory page.
|
||||
continue
|
||||
}
|
||||
|
||||
// Is the block we're looking at free?
|
||||
if gcStateOf(index) != blockStateFree {
|
||||
// This block is in use. Try again from this point.
|
||||
numFreeBlocks = 0
|
||||
index++
|
||||
continue
|
||||
}
|
||||
numFreeBlocks++
|
||||
index++
|
||||
|
||||
// Are we finished?
|
||||
if numFreeBlocks == neededBlocks {
|
||||
// Found a big enough range of free blocks!
|
||||
nextAlloc = index
|
||||
thisAlloc := index - neededBlocks
|
||||
|
||||
// Set the following blocks as being allocated.
|
||||
gcSetState(thisAlloc, blockStateHead)
|
||||
for i := thisAlloc + 1; i != nextAlloc; i++ {
|
||||
gcSetState(i, blockStateTail)
|
||||
}
|
||||
unlock(&gcMutex)
|
||||
// Return a pointer to this allocation.
|
||||
return c.Memset(gcPointerOf(thisAlloc), 0, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||
if ptr == nil {
|
||||
return Alloc(size)
|
||||
}
|
||||
lock(&gcMutex)
|
||||
lazyInit()
|
||||
unlock(&gcMutex)
|
||||
|
||||
ptrAddress := uintptr(ptr)
|
||||
endOfTailAddress := gcAddressOf(gcFindNext(blockFromAddr(ptrAddress)))
|
||||
|
||||
// this might be a few bytes longer than the original size of
|
||||
// ptr, because we align to full blocks of size bytesPerBlock
|
||||
oldSize := endOfTailAddress - ptrAddress
|
||||
if size <= oldSize {
|
||||
return ptr
|
||||
}
|
||||
|
||||
newAlloc := Alloc(size)
|
||||
c.Memcpy(newAlloc, ptr, oldSize)
|
||||
free(ptr)
|
||||
|
||||
return newAlloc
|
||||
}
|
||||
|
||||
func free(ptr unsafe.Pointer) {
|
||||
// TODO: free blocks on request, when the compiler knows they're unused.
|
||||
}
|
||||
|
||||
func GC() uintptr {
|
||||
lock(&gcMutex)
|
||||
freeBytes := gc()
|
||||
unlock(&gcMutex)
|
||||
return freeBytes
|
||||
}
|
||||
|
||||
// runGC performs a garbage collection cycle. It is the internal implementation
|
||||
// of the runtime.GC() function. The difference is that it returns the number of
|
||||
// free bytes in the heap after the GC is finished.
|
||||
func gc() (freeBytes uintptr) {
|
||||
lazyInit()
|
||||
|
||||
if gcDebug {
|
||||
println("running collection cycle...")
|
||||
}
|
||||
|
||||
// Mark phase: mark all reachable objects, recursively.
|
||||
gcMarkReachable()
|
||||
|
||||
finishMark()
|
||||
|
||||
// If we're using threads, resume all other threads before starting the
|
||||
// sweep.
|
||||
gcResumeWorld()
|
||||
|
||||
// Sweep phase: free all non-marked objects and unmark marked objects for
|
||||
// the next collection cycle.
|
||||
freeBytes = sweep()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// markRoots reads all pointers from start to end (exclusive) and if they look
|
||||
// like a heap pointer and are unmarked, marks them and scans that object as
|
||||
// well (recursively). The start and end parameters must be valid pointers and
|
||||
// must be aligned.
|
||||
func markRoots(start, end uintptr) {
|
||||
if start >= end {
|
||||
gcPanic(c.Str("gc: unexpected range to mark"))
|
||||
}
|
||||
// Reduce the end bound to avoid reading too far on platforms where pointer alignment is smaller than pointer size.
|
||||
// If the size of the range is 0, then end will be slightly below start after this.
|
||||
end -= unsafe.Sizeof(end) - unsafe.Alignof(end)
|
||||
|
||||
for addr := start; addr < end; addr += unsafe.Alignof(addr) {
|
||||
root := *(*uintptr)(unsafe.Pointer(addr))
|
||||
markRoot(addr, root)
|
||||
}
|
||||
}
|
||||
|
||||
// startMark starts the marking process on a root and all of its children.
|
||||
func startMark(root uintptr) {
|
||||
var stack [markStackSize]uintptr
|
||||
stack[0] = root
|
||||
gcSetState(root, blockStateMark)
|
||||
stackLen := 1
|
||||
for stackLen > 0 {
|
||||
// Pop a block off of the stack.
|
||||
stackLen--
|
||||
block := stack[stackLen]
|
||||
|
||||
start, end := gcAddressOf(block), gcAddressOf(gcFindNext(block))
|
||||
|
||||
for addr := start; addr != end; addr += unsafe.Alignof(addr) {
|
||||
// Load the word.
|
||||
word := *(*uintptr)(unsafe.Pointer(addr))
|
||||
|
||||
if !isPointer(word) {
|
||||
// Not a heap pointer.
|
||||
continue
|
||||
}
|
||||
|
||||
// Find the corresponding memory block.
|
||||
referencedBlock := blockFromAddr(word)
|
||||
|
||||
if gcStateOf(referencedBlock) == blockStateFree {
|
||||
// The to-be-marked object doesn't actually exist.
|
||||
// This is probably a false positive.
|
||||
continue
|
||||
}
|
||||
|
||||
// Move to the block's head.
|
||||
referencedBlock = gcFindHead(referencedBlock)
|
||||
|
||||
if gcStateOf(referencedBlock) == blockStateMark {
|
||||
// The block has already been marked by something else.
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark block.
|
||||
gcSetState(referencedBlock, blockStateMark)
|
||||
|
||||
if stackLen == len(stack) {
|
||||
// The stack is full.
|
||||
// It is necessary to rescan all marked blocks once we are done.
|
||||
markStackOverflow = true
|
||||
if gcDebug {
|
||||
println("gc stack overflowed")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Push the pointer onto the stack to be scanned later.
|
||||
stack[stackLen] = referencedBlock
|
||||
stackLen++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finishMark finishes the marking process by processing all stack overflows.
|
||||
func finishMark() {
|
||||
for markStackOverflow {
|
||||
// Re-mark all blocks.
|
||||
markStackOverflow = false
|
||||
for block := uintptr(0); block < endBlock; block++ {
|
||||
if gcStateOf(block) != blockStateMark {
|
||||
// Block is not marked, so we do not need to rescan it.
|
||||
continue
|
||||
}
|
||||
|
||||
// Re-mark the block.
|
||||
startMark(block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mark a GC root at the address addr.
|
||||
func markRoot(addr, root uintptr) {
|
||||
if isOnHeap(root) {
|
||||
block := blockFromAddr(root)
|
||||
if gcStateOf(block) == blockStateFree {
|
||||
// The to-be-marked object doesn't actually exist.
|
||||
// This could either be a dangling pointer (oops!) but most likely
|
||||
// just a false positive.
|
||||
return
|
||||
}
|
||||
head := gcFindHead(block)
|
||||
|
||||
if gcStateOf(head) != blockStateMark {
|
||||
startMark(head)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sweep goes through all memory and frees unmarked
|
||||
// It returns how many bytes are free in the heap after the sweep.
|
||||
func sweep() (freeBytes uintptr) {
|
||||
freeCurrentObject := false
|
||||
var freed uint64
|
||||
|
||||
for block := uintptr(0); block < endBlock; block++ {
|
||||
switch gcStateOf(block) {
|
||||
case blockStateHead:
|
||||
// Unmarked head. Free it, including all tail blocks following it.
|
||||
gcMarkFree(block)
|
||||
freeCurrentObject = true
|
||||
gcFrees++
|
||||
freed++
|
||||
case blockStateTail:
|
||||
if freeCurrentObject {
|
||||
// This is a tail object following an unmarked head.
|
||||
// Free it now.
|
||||
gcMarkFree(block)
|
||||
freed++
|
||||
}
|
||||
case blockStateMark:
|
||||
// This is a marked object. The next tail blocks must not be freed,
|
||||
// but the mark bit must be removed so the next GC cycle will
|
||||
// collect this object if it is unreferenced then.
|
||||
gcUnmark(block)
|
||||
freeCurrentObject = false
|
||||
case blockStateFree:
|
||||
freeBytes += bytesPerBlock
|
||||
}
|
||||
}
|
||||
gcFreedBlocks += freed
|
||||
freeBytes += uintptr(freed) * bytesPerBlock
|
||||
return
|
||||
}
|
||||
|
||||
// growHeap tries to grow the heap size. It returns true if it succeeds, false
|
||||
// otherwise.
|
||||
func growHeap() bool {
|
||||
// On baremetal, there is no way the heap can be grown.
|
||||
return false
|
||||
}
|
||||
|
||||
func gcMarkReachable() {
|
||||
markRoots(uintptr(getsp()), stackTop)
|
||||
markRoots(globalsStart, globalsEnd)
|
||||
}
|
||||
|
||||
func gcResumeWorld() {
|
||||
// Nothing to do here (single threaded).
|
||||
}
|
||||
8
runtime/internal/runtime/tinygogc/mutex.go
Normal file
8
runtime/internal/runtime/tinygogc/mutex.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package tinygogc
|
||||
|
||||
// TODO(MeteorsLiu): mutex lock for baremetal GC
|
||||
type mutex struct{}
|
||||
|
||||
func lock(m *mutex) {}
|
||||
|
||||
func unlock(m *mutex) {}
|
||||
604
runtime/internal/runtime/tinygogc/pc_mock_test.go
Normal file
604
runtime/internal/runtime/tinygogc/pc_mock_test.go
Normal file
@@ -0,0 +1,604 @@
|
||||
//go:build testGC
|
||||
|
||||
package tinygogc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
c "github.com/goplus/llgo/runtime/internal/clite"
|
||||
)
|
||||
|
||||
const (
|
||||
// Mock a typical embedded system with 128KB RAM
|
||||
mockHeapSize = 128 * 1024 // 128KB
|
||||
mockGlobalsSize = 4 * 1024 // 4KB for globals
|
||||
mockStackSize = 8 * 1024 // 8KB for stack
|
||||
mockReservedSize = 2048 // 2KB reserved as in real implementation
|
||||
)
|
||||
|
||||
type testObject struct {
|
||||
data [4]uintptr
|
||||
}
|
||||
|
||||
// mockGCEnv provides a controlled root environment for GC testing
|
||||
type mockGCEnv struct {
|
||||
memory []byte
|
||||
heapStart uintptr
|
||||
heapEnd uintptr
|
||||
globalsStart uintptr
|
||||
globalsEnd uintptr
|
||||
stackStart uintptr
|
||||
stackEnd uintptr
|
||||
// Controlled root sets for testing
|
||||
rootObjects []unsafe.Pointer
|
||||
// Original GC state to restore
|
||||
originalHeapStart uintptr
|
||||
originalHeapEnd uintptr
|
||||
originalGlobalsStart uintptr
|
||||
originalGlobalsEnd uintptr
|
||||
originalStackTop uintptr
|
||||
originalEndBlock uintptr
|
||||
originalMetadataStart unsafe.Pointer
|
||||
originalNextAlloc uintptr
|
||||
originalIsGCInit bool
|
||||
// Mock mode flag
|
||||
mockMode bool
|
||||
}
|
||||
|
||||
// createMockGCEnv creates a completely isolated GC environment
|
||||
func createMockGCEnv() *mockGCEnv {
|
||||
totalMemory := mockHeapSize + mockGlobalsSize + mockStackSize
|
||||
memory := make([]byte, totalMemory)
|
||||
baseAddr := uintptr(unsafe.Pointer(&memory[0]))
|
||||
|
||||
env := &mockGCEnv{
|
||||
memory: memory,
|
||||
globalsStart: baseAddr,
|
||||
globalsEnd: baseAddr + mockGlobalsSize,
|
||||
heapStart: baseAddr + mockGlobalsSize + mockReservedSize,
|
||||
heapEnd: baseAddr + mockGlobalsSize + mockHeapSize,
|
||||
stackStart: baseAddr + mockGlobalsSize + mockHeapSize,
|
||||
stackEnd: baseAddr + uintptr(totalMemory),
|
||||
rootObjects: make([]unsafe.Pointer, 0),
|
||||
mockMode: false,
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
// setupMockGC initializes the GC with mock memory layout using initGC's logic
|
||||
func (env *mockGCEnv) setupMockGC() {
|
||||
// Save original GC state
|
||||
env.originalHeapStart = heapStart
|
||||
env.originalHeapEnd = heapEnd
|
||||
env.originalGlobalsStart = globalsStart
|
||||
env.originalGlobalsEnd = globalsEnd
|
||||
env.originalStackTop = stackTop
|
||||
env.originalEndBlock = endBlock
|
||||
env.originalMetadataStart = metadataStart
|
||||
env.originalNextAlloc = nextAlloc
|
||||
env.originalIsGCInit = isGCInit
|
||||
|
||||
// Set currentStack for getsp()
|
||||
currentStack = env.stackStart
|
||||
|
||||
// Apply initGC's logic with our mock memory layout
|
||||
// This is the same logic as initGC() but with our mock addresses
|
||||
heapStart = env.heapStart + 2048 // reserve 2K blocks like initGC does
|
||||
heapEnd = env.heapEnd
|
||||
globalsStart = env.globalsStart
|
||||
globalsEnd = env.globalsEnd
|
||||
stackTop = env.stackEnd
|
||||
|
||||
totalSize := heapEnd - heapStart
|
||||
metadataSize := (totalSize + blocksPerStateByte*bytesPerBlock) / (1 + blocksPerStateByte*bytesPerBlock)
|
||||
metadataStart = unsafe.Pointer(heapEnd - metadataSize)
|
||||
endBlock = (uintptr(metadataStart) - heapStart) / bytesPerBlock
|
||||
|
||||
// Clear metadata using memset like initGC does
|
||||
c.Memset(metadataStart, 0, metadataSize)
|
||||
|
||||
// Reset allocator state and all GC statistics for clean test environment
|
||||
nextAlloc = 0
|
||||
isGCInit = true
|
||||
|
||||
// Reset all GC statistics to start from clean state
|
||||
gcTotalAlloc = 0
|
||||
gcTotalBlocks = 0
|
||||
gcMallocs = 0
|
||||
gcFrees = 0
|
||||
gcFreedBlocks = 0
|
||||
markStackOverflow = false
|
||||
}
|
||||
|
||||
// restoreOriginalGC restores the original GC state
|
||||
func (env *mockGCEnv) restoreOriginalGC() {
|
||||
heapStart = env.originalHeapStart
|
||||
heapEnd = env.originalHeapEnd
|
||||
globalsStart = env.originalGlobalsStart
|
||||
globalsEnd = env.originalGlobalsEnd
|
||||
stackTop = env.originalStackTop
|
||||
endBlock = env.originalEndBlock
|
||||
metadataStart = env.originalMetadataStart
|
||||
nextAlloc = env.originalNextAlloc
|
||||
isGCInit = false
|
||||
}
|
||||
|
||||
// enableMockMode enables mock root scanning mode
|
||||
func (env *mockGCEnv) enableMockMode() {
|
||||
env.mockMode = true
|
||||
}
|
||||
|
||||
// disableMockMode disables mock root scanning mode
|
||||
func (env *mockGCEnv) disableMockMode() {
|
||||
env.mockMode = false
|
||||
}
|
||||
|
||||
// addRoot adds an object to the controlled root set
|
||||
func (env *mockGCEnv) addRoot(ptr unsafe.Pointer) {
|
||||
env.rootObjects = append(env.rootObjects, ptr)
|
||||
}
|
||||
|
||||
// clearRoots removes all objects from the controlled root set
|
||||
func (env *mockGCEnv) clearRoots() {
|
||||
env.rootObjects = env.rootObjects[:0]
|
||||
}
|
||||
|
||||
// mockMarkReachable replaces gcMarkReachable when in mock mode
|
||||
func (env *mockGCEnv) mockMarkReachable() {
|
||||
if !env.mockMode {
|
||||
// Use original logic
|
||||
markRoots(uintptr(getsp()), stackTop)
|
||||
markRoots(globalsStart, globalsEnd)
|
||||
return
|
||||
}
|
||||
|
||||
// Mock mode: only scan our controlled roots
|
||||
for _, root := range env.rootObjects {
|
||||
addr := uintptr(root)
|
||||
markRoot(addr, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// runMockGC runs standard GC but with controlled root scanning
|
||||
func (env *mockGCEnv) runMockGC() uintptr {
|
||||
lock(&gcMutex)
|
||||
defer unlock(&gcMutex)
|
||||
|
||||
lazyInit()
|
||||
|
||||
if gcDebug {
|
||||
println("running mock collection cycle...")
|
||||
}
|
||||
|
||||
// Mark phase: use our mock root scanning
|
||||
env.mockMarkReachable()
|
||||
finishMark()
|
||||
|
||||
// Resume world (no-op in single threaded)
|
||||
gcResumeWorld()
|
||||
|
||||
// Sweep phase: use standard sweep logic
|
||||
return sweep()
|
||||
}
|
||||
|
||||
// createTestObjects creates a network of objects for testing reachability
|
||||
func createTestObjects(env *mockGCEnv) []*testObject {
|
||||
// Allocate several test objects
|
||||
objects := make([]*testObject, 0, 10)
|
||||
|
||||
// Dependencies Graph
|
||||
// root1 -> child1 -> grandchild1 -> child2
|
||||
// root1 -> child2 -> grandchild1
|
||||
|
||||
// Create root objects (reachable from stack/globals)
|
||||
root1 := (*testObject)(Alloc(unsafe.Sizeof(testObject{})))
|
||||
root2 := (*testObject)(Alloc(unsafe.Sizeof(testObject{})))
|
||||
objects = append(objects, root1, root2)
|
||||
|
||||
// Create objects reachable from root1
|
||||
child1 := (*testObject)(Alloc(unsafe.Sizeof(testObject{})))
|
||||
child2 := (*testObject)(Alloc(unsafe.Sizeof(testObject{})))
|
||||
root1.data[0] = uintptr(unsafe.Pointer(child1))
|
||||
root1.data[1] = uintptr(unsafe.Pointer(child2))
|
||||
objects = append(objects, child1, child2)
|
||||
|
||||
// Create objects reachable from child1
|
||||
grandchild1 := (*testObject)(Alloc(unsafe.Sizeof(testObject{})))
|
||||
child1.data[0] = uintptr(unsafe.Pointer(grandchild1))
|
||||
objects = append(objects, grandchild1)
|
||||
|
||||
// Create circular reference between child2 and grandchild1
|
||||
child2.data[0] = uintptr(unsafe.Pointer(grandchild1))
|
||||
grandchild1.data[0] = uintptr(unsafe.Pointer(child2))
|
||||
|
||||
// Create unreachable objects (garbage)
|
||||
garbage1 := (*testObject)(Alloc(unsafe.Sizeof(testObject{})))
|
||||
garbage2 := (*testObject)(Alloc(unsafe.Sizeof(testObject{})))
|
||||
// Create circular reference in garbage
|
||||
garbage1.data[0] = uintptr(unsafe.Pointer(garbage2))
|
||||
garbage2.data[0] = uintptr(unsafe.Pointer(garbage1))
|
||||
objects = append(objects, garbage1, garbage2)
|
||||
|
||||
return objects
|
||||
}
|
||||
|
||||
func TestMockGCBasicAllocation(t *testing.T) {
|
||||
env := createMockGCEnv()
|
||||
env.setupMockGC()
|
||||
defer env.restoreOriginalGC()
|
||||
|
||||
// Test basic allocation
|
||||
ptr1 := Alloc(32)
|
||||
if ptr1 == nil {
|
||||
t.Fatal("Failed to allocate 32 bytes")
|
||||
}
|
||||
|
||||
ptr2 := Alloc(64)
|
||||
if ptr2 == nil {
|
||||
t.Fatal("Failed to allocate 64 bytes")
|
||||
}
|
||||
|
||||
// Verify pointers are within heap bounds
|
||||
addr1 := uintptr(ptr1)
|
||||
addr2 := uintptr(ptr2)
|
||||
|
||||
if addr1 < heapStart || addr1 >= uintptr(metadataStart) {
|
||||
t.Errorf("ptr1 %x not within heap bounds [%x, %x)", addr1, heapStart, uintptr(metadataStart))
|
||||
}
|
||||
|
||||
if addr2 < heapStart || addr2 >= uintptr(metadataStart) {
|
||||
t.Errorf("ptr2 %x not within heap bounds [%x, %x)", addr2, heapStart, uintptr(metadataStart))
|
||||
}
|
||||
|
||||
t.Logf("Allocated ptr1 at %x, ptr2 at %x", addr1, addr2)
|
||||
t.Logf("Heap bounds: [%x, %x)", heapStart, uintptr(metadataStart))
|
||||
}
|
||||
|
||||
func TestMockGCReachabilityAndSweep(t *testing.T) {
|
||||
env := createMockGCEnv()
|
||||
env.setupMockGC()
|
||||
defer env.restoreOriginalGC()
|
||||
|
||||
// Track initial stats
|
||||
initialMallocs := gcMallocs
|
||||
initialFrees := gcFrees
|
||||
|
||||
// Create test object network
|
||||
objects := createTestObjects(env)
|
||||
|
||||
// Add first 2 objects as roots using mock control
|
||||
env.enableMockMode()
|
||||
env.addRoot(unsafe.Pointer(objects[0])) // root1
|
||||
env.addRoot(unsafe.Pointer(objects[1])) // root2
|
||||
|
||||
t.Logf("Created %d objects, 2 are roots", len(objects))
|
||||
t.Logf("Mallocs: %d", gcMallocs-initialMallocs)
|
||||
|
||||
// Verify all objects are initially allocated
|
||||
for i, obj := range objects {
|
||||
addr := uintptr(unsafe.Pointer(obj))
|
||||
block := blockFromAddr(addr)
|
||||
state := gcStateOf(block)
|
||||
if state != blockStateHead {
|
||||
t.Errorf("Object %d at %x has state %d, expected %d (HEAD)", i, addr, state, blockStateHead)
|
||||
}
|
||||
}
|
||||
|
||||
// Perform GC with controlled root scanning
|
||||
freedBytes := env.runMockGC()
|
||||
t.Logf("Freed %d bytes during GC", freedBytes)
|
||||
t.Logf("Frees: %d (delta: %d)", gcFrees, gcFrees-initialFrees)
|
||||
|
||||
// Verify reachable objects are still allocated
|
||||
reachableObjects := []unsafe.Pointer{
|
||||
unsafe.Pointer(objects[0]), // root1
|
||||
unsafe.Pointer(objects[1]), // root2
|
||||
unsafe.Pointer(objects[2]), // child1 (reachable from root1)
|
||||
unsafe.Pointer(objects[3]), // child2 (reachable from root1)
|
||||
unsafe.Pointer(objects[4]), // grandchild1 (reachable from child1, child2)
|
||||
}
|
||||
|
||||
for i, obj := range reachableObjects {
|
||||
addr := uintptr(obj)
|
||||
block := blockFromAddr(addr)
|
||||
state := gcStateOf(block)
|
||||
if state != blockStateHead {
|
||||
t.Errorf("Reachable object %d at %x has state %d, expected %d (HEAD)", i, addr, state, blockStateHead)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify unreachable objects are freed
|
||||
unreachableObjects := []unsafe.Pointer{
|
||||
unsafe.Pointer(objects[5]), // garbage1
|
||||
unsafe.Pointer(objects[6]), // garbage2
|
||||
}
|
||||
|
||||
for i, obj := range unreachableObjects {
|
||||
addr := uintptr(obj)
|
||||
block := blockFromAddr(addr)
|
||||
state := gcStateOf(block)
|
||||
if state != blockStateFree {
|
||||
t.Errorf("Unreachable object %d at %x has state %d, expected %d (FREE)", i, addr, state, blockStateFree)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify some memory was actually freed
|
||||
if freedBytes == 0 {
|
||||
t.Error("Expected some memory to be freed, but freed 0 bytes")
|
||||
}
|
||||
|
||||
if gcFrees == initialFrees {
|
||||
t.Error("Expected some objects to be freed, but free count didn't change")
|
||||
}
|
||||
|
||||
// Clear refs to make grandchild1 unreachable
|
||||
objects[2].data[0] = 0 // child1 -> grandchild1
|
||||
objects[3].data[0] = 0 // child2 -> grandchild1
|
||||
|
||||
// Run GC again with same roots
|
||||
freedBytes = env.runMockGC()
|
||||
|
||||
// child2 should still be reachable (through root1)
|
||||
blockAddr := blockFromAddr(uintptr(unsafe.Pointer(objects[3])))
|
||||
state := gcStateOf(blockAddr)
|
||||
if state != blockStateHead {
|
||||
t.Errorf("Object child2 at %x has state %d, expected %d (HEAD)", blockAddr, state, blockStateHead)
|
||||
}
|
||||
|
||||
// grandchild1 should now be unreachable and freed
|
||||
blockAddr = blockFromAddr(uintptr(unsafe.Pointer(objects[4])))
|
||||
state = gcStateOf(blockAddr)
|
||||
if state != blockStateFree {
|
||||
t.Errorf("Object grandchild1 at %x has state %d, expected %d (FREE)", blockAddr, state, blockStateFree)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMockGCMemoryPressure(t *testing.T) {
|
||||
env := createMockGCEnv()
|
||||
env.setupMockGC()
|
||||
defer env.restoreOriginalGC()
|
||||
|
||||
// Calculate available heap space
|
||||
heapSize := uintptr(metadataStart) - heapStart
|
||||
blockSize := bytesPerBlock
|
||||
maxBlocks := heapSize / blockSize
|
||||
|
||||
t.Logf("Heap size: %d bytes, Block size: %d bytes, Max blocks: %d",
|
||||
heapSize, blockSize, maxBlocks)
|
||||
|
||||
// Allocate until we trigger GC
|
||||
var allocations []unsafe.Pointer
|
||||
allocSize := uintptr(32) // Small allocations
|
||||
|
||||
// Allocate about 80% of heap to trigger GC pressure
|
||||
targetAllocations := int(maxBlocks * 4 / 5) // 80% capacity
|
||||
|
||||
for i := 0; i < targetAllocations; i++ {
|
||||
ptr := Alloc(allocSize)
|
||||
if ptr == nil {
|
||||
t.Fatalf("Failed to allocate at iteration %d", i)
|
||||
}
|
||||
allocations = append(allocations, ptr)
|
||||
}
|
||||
|
||||
initialMallocs := gcMallocs
|
||||
t.Logf("Allocated %d objects (%d mallocs total)", len(allocations), initialMallocs)
|
||||
|
||||
// Enable mock mode and keep only half the allocations as roots
|
||||
env.enableMockMode()
|
||||
keepCount := len(allocations) / 2
|
||||
for i := 0; i < keepCount; i++ {
|
||||
env.addRoot(allocations[i])
|
||||
}
|
||||
|
||||
t.Logf("Keeping %d objects as roots, %d should be freed", keepCount, len(allocations)-keepCount)
|
||||
|
||||
// Force GC with controlled roots
|
||||
freeBytes := env.runMockGC()
|
||||
|
||||
t.Logf("GC freed %d bytes", freeBytes)
|
||||
t.Logf("Objects freed: %d", gcFrees)
|
||||
|
||||
// Try to allocate more after GC
|
||||
for i := 0; i < 10; i++ {
|
||||
ptr := Alloc(allocSize)
|
||||
if ptr == nil {
|
||||
t.Fatalf("Failed to allocate after GC at iteration %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("Successfully allocated more objects after GC")
|
||||
}
|
||||
|
||||
func TestMockGCStats(t *testing.T) {
|
||||
env := createMockGCEnv()
|
||||
env.setupMockGC()
|
||||
defer env.restoreOriginalGC()
|
||||
|
||||
// Get initial stats
|
||||
initialStats := ReadGCStats()
|
||||
t.Logf("Initial stats - Mallocs: %d, Frees: %d, TotalAlloc: %d, Alloc: %d",
|
||||
initialStats.Mallocs, initialStats.Frees, initialStats.TotalAlloc, initialStats.Alloc)
|
||||
|
||||
// Verify basic system stats
|
||||
expectedSys := uint64(env.heapEnd - env.heapStart - 2048)
|
||||
if initialStats.Sys != expectedSys {
|
||||
t.Errorf("Expected Sys %d, got %d", expectedSys, initialStats.Sys)
|
||||
}
|
||||
|
||||
expectedGCSys := uint64(env.heapEnd - uintptr(metadataStart))
|
||||
if initialStats.GCSys != expectedGCSys {
|
||||
t.Errorf("Expected GCSys %d, got %d", expectedGCSys, initialStats.GCSys)
|
||||
}
|
||||
|
||||
// Allocate some objects
|
||||
var allocations []unsafe.Pointer
|
||||
allocSize := uintptr(64)
|
||||
numAllocs := 10
|
||||
|
||||
for i := 0; i < numAllocs; i++ {
|
||||
ptr := Alloc(allocSize)
|
||||
if ptr == nil {
|
||||
t.Fatalf("Failed to allocate at iteration %d", i)
|
||||
}
|
||||
allocations = append(allocations, ptr)
|
||||
}
|
||||
|
||||
// Check stats after allocation
|
||||
afterAllocStats := ReadGCStats()
|
||||
t.Logf("After allocation - Mallocs: %d, Frees: %d, TotalAlloc: %d, Alloc: %d",
|
||||
afterAllocStats.Mallocs, afterAllocStats.Frees, afterAllocStats.TotalAlloc, afterAllocStats.Alloc)
|
||||
|
||||
// Verify allocation stats increased
|
||||
if afterAllocStats.Mallocs <= initialStats.Mallocs {
|
||||
t.Errorf("Expected Mallocs to increase from %d, got %d", initialStats.Mallocs, afterAllocStats.Mallocs)
|
||||
}
|
||||
|
||||
if afterAllocStats.TotalAlloc <= initialStats.TotalAlloc {
|
||||
t.Errorf("Expected TotalAlloc to increase from %d, got %d", initialStats.TotalAlloc, afterAllocStats.TotalAlloc)
|
||||
}
|
||||
|
||||
if afterAllocStats.Alloc <= initialStats.Alloc {
|
||||
t.Errorf("Expected Alloc to increase from %d, got %d", initialStats.Alloc, afterAllocStats.Alloc)
|
||||
}
|
||||
|
||||
// Verify Alloc and HeapAlloc are the same
|
||||
if afterAllocStats.Alloc != afterAllocStats.HeapAlloc {
|
||||
t.Errorf("Expected Alloc (%d) to equal HeapAlloc (%d)", afterAllocStats.Alloc, afterAllocStats.HeapAlloc)
|
||||
}
|
||||
|
||||
// Perform GC with controlled roots - keep only half the allocations
|
||||
env.enableMockMode()
|
||||
keepCount := len(allocations) / 2
|
||||
for i := 0; i < keepCount; i++ {
|
||||
env.addRoot(allocations[i])
|
||||
}
|
||||
|
||||
freedBytes := env.runMockGC()
|
||||
t.Logf("GC freed %d bytes", freedBytes)
|
||||
|
||||
// Check stats after GC
|
||||
afterGCStats := ReadGCStats()
|
||||
t.Logf("After GC - Mallocs: %d, Frees: %d, TotalAlloc: %d, Alloc: %d",
|
||||
afterGCStats.Mallocs, afterGCStats.Frees, afterGCStats.TotalAlloc, afterGCStats.Alloc)
|
||||
|
||||
// Verify GC stats
|
||||
if afterGCStats.Frees <= afterAllocStats.Frees {
|
||||
t.Errorf("Expected Frees to increase from %d, got %d", afterAllocStats.Frees, afterGCStats.Frees)
|
||||
}
|
||||
|
||||
// TotalAlloc should not decrease (cumulative)
|
||||
if afterGCStats.TotalAlloc != afterAllocStats.TotalAlloc {
|
||||
t.Errorf("Expected TotalAlloc to remain %d after GC, got %d", afterAllocStats.TotalAlloc, afterGCStats.TotalAlloc)
|
||||
}
|
||||
|
||||
// Alloc should decrease (freed objects)
|
||||
if afterGCStats.Alloc >= afterAllocStats.Alloc {
|
||||
t.Errorf("Expected Alloc to decrease from %d after GC, got %d", afterAllocStats.Alloc, afterGCStats.Alloc)
|
||||
}
|
||||
|
||||
// Verify heap statistics consistency
|
||||
if afterGCStats.HeapSys != afterGCStats.HeapInuse+afterGCStats.HeapIdle {
|
||||
t.Errorf("Expected HeapSys (%d) to equal HeapInuse (%d) + HeapIdle (%d)",
|
||||
afterGCStats.HeapSys, afterGCStats.HeapInuse, afterGCStats.HeapIdle)
|
||||
}
|
||||
|
||||
// Verify live objects calculation
|
||||
expectedLiveObjects := afterGCStats.Mallocs - afterGCStats.Frees
|
||||
t.Logf("Live objects: %d (Mallocs: %d - Frees: %d)", expectedLiveObjects, afterGCStats.Mallocs, afterGCStats.Frees)
|
||||
|
||||
// The number of live objects should be reasonable (we kept half the allocations plus some overhead)
|
||||
if expectedLiveObjects < uint64(keepCount) {
|
||||
t.Errorf("Expected at least %d live objects, got %d", keepCount, expectedLiveObjects)
|
||||
}
|
||||
|
||||
// Test stack statistics
|
||||
if afterGCStats.StackInuse > afterGCStats.StackSys {
|
||||
t.Errorf("StackInuse (%d) should not exceed StackSys (%d)", afterGCStats.StackInuse, afterGCStats.StackSys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMockGCCircularReferences(t *testing.T) {
|
||||
env := createMockGCEnv()
|
||||
env.setupMockGC()
|
||||
defer env.restoreOriginalGC()
|
||||
|
||||
type Node struct {
|
||||
data [3]uintptr
|
||||
next uintptr
|
||||
}
|
||||
|
||||
// Create a circular linked list
|
||||
nodes := make([]*Node, 5)
|
||||
for i := range nodes {
|
||||
nodes[i] = (*Node)(Alloc(unsafe.Sizeof(Node{})))
|
||||
nodes[i].data[0] = uintptr(i) // Store index as data
|
||||
}
|
||||
|
||||
// Link them in a circle
|
||||
for i := range nodes {
|
||||
nextIdx := (i + 1) % len(nodes)
|
||||
nodes[i].next = uintptr(unsafe.Pointer(nodes[nextIdx]))
|
||||
}
|
||||
|
||||
t.Logf("Created circular list of %d nodes", len(nodes))
|
||||
|
||||
// Initially all should be allocated
|
||||
for i, node := range nodes {
|
||||
addr := uintptr(unsafe.Pointer(node))
|
||||
block := blockFromAddr(addr)
|
||||
state := gcStateOf(block)
|
||||
if state != blockStateHead {
|
||||
t.Errorf("Node %d at %x has state %d, expected %d", i, addr, state, blockStateHead)
|
||||
}
|
||||
}
|
||||
|
||||
// Test 1: With root references - objects should NOT be freed
|
||||
env.enableMockMode()
|
||||
// Add the first node as root (keeps entire circle reachable)
|
||||
env.addRoot(unsafe.Pointer(nodes[0]))
|
||||
|
||||
freeBytes := env.runMockGC()
|
||||
t.Logf("GC with root reference freed %d bytes", freeBytes)
|
||||
|
||||
// All nodes should still be allocated since they're reachable through the root
|
||||
for i, node := range nodes {
|
||||
addr := uintptr(unsafe.Pointer(node))
|
||||
block := blockFromAddr(addr)
|
||||
state := gcStateOf(block)
|
||||
if state != blockStateHead {
|
||||
t.Errorf("Node %d at %x should still be allocated, but has state %d", i, addr, state)
|
||||
}
|
||||
}
|
||||
|
||||
// Test 2: Without root references - all circular objects should be freed
|
||||
env.clearRoots() // Remove all root references
|
||||
|
||||
freeBytes = env.runMockGC()
|
||||
t.Logf("GC without roots freed %d bytes", freeBytes)
|
||||
|
||||
// All nodes should now be freed since they're not reachable from any roots
|
||||
expectedFreed := uintptr(len(nodes)) * ((unsafe.Sizeof(Node{}) + bytesPerBlock - 1) / bytesPerBlock) * bytesPerBlock
|
||||
|
||||
if freeBytes < expectedFreed {
|
||||
t.Errorf("Expected at least %d bytes freed, got %d", expectedFreed, freeBytes)
|
||||
}
|
||||
|
||||
// Verify all nodes are actually freed
|
||||
for i, node := range nodes {
|
||||
addr := uintptr(unsafe.Pointer(node))
|
||||
block := blockFromAddr(addr)
|
||||
state := gcStateOf(block)
|
||||
if state != blockStateFree {
|
||||
t.Errorf("Node %d at %x should be freed, but has state %d", i, addr, state)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify we can allocate new objects in the freed space
|
||||
newPtr := Alloc(unsafe.Sizeof(Node{}))
|
||||
if newPtr == nil {
|
||||
t.Error("Failed to allocate after freeing circular references")
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !nogc
|
||||
//go:build !nogc && !baremetal
|
||||
|
||||
/*
|
||||
* Copyright (c) 2025 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build nogc
|
||||
//go:build nogc || baremetal
|
||||
|
||||
/*
|
||||
* Copyright (c) 2025 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !nogc
|
||||
// +build !nogc
|
||||
//go:build !nogc && !baremetal
|
||||
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
|
||||
35
runtime/internal/runtime/z_gc_baremetal.go
Normal file
35
runtime/internal/runtime/z_gc_baremetal.go
Normal file
@@ -0,0 +1,35 @@
|
||||
//go:build !nogc && baremetal
|
||||
|
||||
/*
|
||||
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/runtime/internal/runtime/tinygogc"
|
||||
)
|
||||
|
||||
// AllocU allocates uninitialized memory.
|
||||
func AllocU(size uintptr) unsafe.Pointer {
|
||||
return tinygogc.Alloc(size)
|
||||
}
|
||||
|
||||
// AllocZ allocates zero-initialized memory.
|
||||
func AllocZ(size uintptr) unsafe.Pointer {
|
||||
return tinygogc.Alloc(size)
|
||||
}
|
||||
@@ -1,6 +1,4 @@
|
||||
__stack = ORIGIN(dram_seg) + LENGTH(dram_seg);
|
||||
__MIN_STACK_SIZE = 0x1000;
|
||||
_stack_top = __stack;
|
||||
_heapEnd = ORIGIN(dram_seg) + LENGTH(dram_seg);
|
||||
|
||||
/* Default entry point */
|
||||
ENTRY(_start)
|
||||
@@ -94,6 +92,12 @@ SECTIONS
|
||||
_iram_end = .;
|
||||
} > iram_seg
|
||||
|
||||
.stack (NOLOAD) :
|
||||
{
|
||||
. += 16K;
|
||||
__stack = .;
|
||||
} > dram_seg
|
||||
|
||||
/**
|
||||
* This section is required to skip .iram0.text area because iram0_0_seg and
|
||||
* dram0_0_seg reflect the same address space on different buses.
|
||||
@@ -148,7 +152,7 @@ SECTIONS
|
||||
} > dram_seg
|
||||
|
||||
/* Check if data + heap + stack exceeds RAM limit */
|
||||
ASSERT(_end <= __stack - __MIN_STACK_SIZE, "region DRAM overflowed by .data and .bss sections")
|
||||
ASSERT(_end <= _heapEnd, "region DRAM overflowed by .data and .bss sections")
|
||||
|
||||
/* Stabs debugging sections. */
|
||||
.stab 0 : { *(.stab) }
|
||||
@@ -193,3 +197,8 @@ SECTIONS
|
||||
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
|
||||
/DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
|
||||
}
|
||||
|
||||
_globals_start = _data_start;
|
||||
_globals_end = _end;
|
||||
_heapStart = _end;
|
||||
_stack_top = __stack;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
__stack = ORIGIN(dram_seg) + LENGTH(dram_seg);
|
||||
__MIN_STACK_SIZE = 0x2000;
|
||||
_heapEnd = ORIGIN(dram_seg) + LENGTH(dram_seg);
|
||||
|
||||
ENTRY(_start)
|
||||
SECTIONS
|
||||
@@ -26,6 +25,14 @@ SECTIONS
|
||||
the same address within the page on the next page up. */
|
||||
. = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
|
||||
|
||||
.stack (NOLOAD) :
|
||||
{
|
||||
_stack_end = .;
|
||||
. = ALIGN(16);
|
||||
. += 16K;
|
||||
__stack = .;
|
||||
}
|
||||
|
||||
|
||||
.rodata :
|
||||
{
|
||||
@@ -116,7 +123,7 @@ SECTIONS
|
||||
. = DATA_SEGMENT_END (.);
|
||||
|
||||
/* Check if data + heap + stack exceeds RAM limit */
|
||||
ASSERT(. <= __stack - __MIN_STACK_SIZE, "region DRAM overflowed by .data and .bss sections")
|
||||
ASSERT(. <= _heapEnd, "region DRAM overflowed by .data and .bss sections")
|
||||
|
||||
/* Stabs debugging sections. */
|
||||
.stab 0 : { *(.stab) }
|
||||
@@ -165,4 +172,7 @@ SECTIONS
|
||||
|
||||
_sbss = __bss_start;
|
||||
_ebss = _end;
|
||||
|
||||
_globals_start = _data_start;
|
||||
_globals_end = _end;
|
||||
_heapStart = _end;
|
||||
_stack_top = __stack;
|
||||
|
||||
@@ -19,8 +19,8 @@ MEMORY
|
||||
/* 64k at the end of DRAM, after ROM bootloader stack
|
||||
* or entire DRAM (for QEMU only)
|
||||
*/
|
||||
dram_seg (RW) : org = 0x3FFF0000 ,
|
||||
len = 0x10000
|
||||
dram_seg (RW) : org = 0x3ffae000 ,
|
||||
len = 0x52000
|
||||
}
|
||||
|
||||
INCLUDE "targets/esp32.app.elf.ld";
|
||||
|
||||
Reference in New Issue
Block a user