fix: bdwgc.init() causing archive mode building fail
P P P
This commit is contained in:
@@ -997,10 +997,6 @@ define weak void @runtime.init() {
|
|||||||
ret void
|
ret void
|
||||||
}
|
}
|
||||||
|
|
||||||
define weak void @initGC() {
|
|
||||||
ret void
|
|
||||||
}
|
|
||||||
|
|
||||||
; TODO(lijie): workaround for syscall patch
|
; TODO(lijie): workaround for syscall patch
|
||||||
define weak void @"syscall.init"() {
|
define weak void @"syscall.init"() {
|
||||||
ret void
|
ret void
|
||||||
@@ -1012,7 +1008,6 @@ define weak void @"syscall.init"() {
|
|||||||
_llgo_0:
|
_llgo_0:
|
||||||
store i32 %%0, ptr @__llgo_argc, align 4
|
store i32 %%0, ptr @__llgo_argc, align 4
|
||||||
store ptr %%1, ptr @__llgo_argv, align 8
|
store ptr %%1, ptr @__llgo_argv, align 8
|
||||||
call void @initGC()
|
|
||||||
%s
|
%s
|
||||||
%s
|
%s
|
||||||
%s
|
%s
|
||||||
|
|||||||
@@ -26,11 +26,6 @@ const (
|
|||||||
LLGoPackage = "link: $(pkg-config --libs bdw-gc); -lgc"
|
LLGoPackage = "link: $(pkg-config --libs bdw-gc); -lgc"
|
||||||
)
|
)
|
||||||
|
|
||||||
//export initGC
|
|
||||||
func initGC() {
|
|
||||||
Init()
|
|
||||||
}
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
//go:linkname Init C.GC_init
|
//go:linkname Init C.GC_init
|
||||||
|
|||||||
@@ -1,9 +1,3 @@
|
|||||||
package tinygogc
|
package tinygogc
|
||||||
|
|
||||||
import "github.com/goplus/llgo/runtime/internal/runtime"
|
const LLGoPackage = "link: --wrap=malloc --wrap=realloc --wrap=calloc"
|
||||||
|
|
||||||
const LLGoPackage = "noinit"
|
|
||||||
|
|
||||||
func GC() {
|
|
||||||
runtime.GC()
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -16,29 +16,16 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package runtime
|
package tinygogc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
_ "unsafe"
|
_ "unsafe"
|
||||||
|
|
||||||
c "github.com/goplus/llgo/runtime/internal/clite"
|
|
||||||
"github.com/goplus/llgo/runtime/internal/runtime/tinygogc/memory"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const gcDebug = false
|
const gcDebug = false
|
||||||
const needsStaticHeap = true
|
const needsStaticHeap = true
|
||||||
|
|
||||||
// Some globals + constants for the entire GC.
|
|
||||||
|
|
||||||
const (
|
|
||||||
wordsPerBlock = 4 // number of pointers in an allocated block
|
|
||||||
bytesPerBlock = wordsPerBlock * unsafe.Sizeof(memory.HeapStart)
|
|
||||||
stateBits = 2 // how many bits a block state takes (see blockState type)
|
|
||||||
blocksPerStateByte = 8 / stateBits
|
|
||||||
markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan
|
|
||||||
)
|
|
||||||
|
|
||||||
// Provide some abc.Straction over heap blocks.
|
// Provide some abc.Straction over heap blocks.
|
||||||
|
|
||||||
// blockState stores the four states in which a block can be. It is two bits in
|
// blockState stores the four states in which a block can be. It is two bits in
|
||||||
@@ -52,17 +39,52 @@ const (
|
|||||||
blockStateMask uint8 = 3 // 11
|
blockStateMask uint8 = 3 // 11
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// The byte value of a block where every block is a 'tail' block.
|
||||||
|
const blockStateByteAllTails = 0 |
|
||||||
|
uint8(blockStateTail<<(stateBits*3)) |
|
||||||
|
uint8(blockStateTail<<(stateBits*2)) |
|
||||||
|
uint8(blockStateTail<<(stateBits*1)) |
|
||||||
|
uint8(blockStateTail<<(stateBits*0))
|
||||||
|
|
||||||
//go:linkname getsp llgo.stackSave
|
//go:linkname getsp llgo.stackSave
|
||||||
func getsp() unsafe.Pointer
|
func getsp() unsafe.Pointer
|
||||||
|
|
||||||
func printlnAndPanic(c string) {
|
// when executing initGC(), we must ensure there's no any allocations.
|
||||||
println(c)
|
// use linking here to avoid import clite
|
||||||
panic("")
|
//
|
||||||
}
|
//go:linkname memset C.memset
|
||||||
|
func memset(unsafe.Pointer, int, uintptr) unsafe.Pointer
|
||||||
|
|
||||||
|
//go:linkname memcpy C.memcpy
|
||||||
|
func memcpy(unsafe.Pointer, unsafe.Pointer, uintptr)
|
||||||
|
|
||||||
|
//go:linkname _heapStart _heapStart
|
||||||
|
var _heapStart [0]byte
|
||||||
|
|
||||||
|
//go:linkname _heapEnd _heapEnd
|
||||||
|
var _heapEnd [0]byte
|
||||||
|
|
||||||
|
//go:linkname _stackStart _stack_top
|
||||||
|
var _stackStart [0]byte
|
||||||
|
|
||||||
|
//go:linkname _globals_start _globals_start
|
||||||
|
var _globals_start [0]byte
|
||||||
|
|
||||||
|
//go:linkname _globals_end _globals_end
|
||||||
|
var _globals_end [0]byte
|
||||||
|
|
||||||
|
// since we don't have an init() function, these should be initalized by initHeap(), which is called by <main> entry
|
||||||
var (
|
var (
|
||||||
|
heapStart uintptr // start address of heap area
|
||||||
|
heapEnd uintptr // end address of heap area
|
||||||
|
globalsStart uintptr // start address of global variable area
|
||||||
|
globalsEnd uintptr // end address of global variable area
|
||||||
|
stackTop uintptr // the top of stack
|
||||||
|
endBlock uintptr // GC end block index
|
||||||
|
metadataStart unsafe.Pointer // start address of GC metadata
|
||||||
|
isGCInit bool
|
||||||
|
|
||||||
nextAlloc uintptr // the next block that should be tried by the allocator
|
nextAlloc uintptr // the next block that should be tried by the allocator
|
||||||
endBlock uintptr // the block just past the end of the available space
|
|
||||||
gcTotalAlloc uint64 // total number of bytes allocated
|
gcTotalAlloc uint64 // total number of bytes allocated
|
||||||
gcTotalBlocks uint64 // total number of allocated blocks
|
gcTotalBlocks uint64 // total number of allocated blocks
|
||||||
gcMallocs uint64 // total number of allocations
|
gcMallocs uint64 // total number of allocations
|
||||||
@@ -77,24 +99,61 @@ var (
|
|||||||
zeroSizedAlloc uint8
|
zeroSizedAlloc uint8
|
||||||
)
|
)
|
||||||
|
|
||||||
// blockState stores the four states in which a block can be. It is two bits in
|
// Some globals + constants for the entire GC.
|
||||||
// size.
|
|
||||||
type blockState uint8
|
|
||||||
|
|
||||||
// The byte value of a block where every block is a 'tail' block.
|
const (
|
||||||
const blockStateByteAllTails = 0 |
|
wordsPerBlock = 4 // number of pointers in an allocated block
|
||||||
uint8(blockStateTail<<(stateBits*3)) |
|
bytesPerBlock = wordsPerBlock * unsafe.Sizeof(heapStart)
|
||||||
uint8(blockStateTail<<(stateBits*2)) |
|
stateBits = 2 // how many bits a block state takes (see blockState type)
|
||||||
uint8(blockStateTail<<(stateBits*1)) |
|
blocksPerStateByte = 8 / stateBits
|
||||||
uint8(blockStateTail<<(stateBits*0))
|
markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan
|
||||||
|
)
|
||||||
|
|
||||||
|
//export __wrap_malloc
|
||||||
|
func __wrap_malloc(size uintptr) unsafe.Pointer {
|
||||||
|
return Alloc(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
//export __wrap_calloc
|
||||||
|
func __wrap_calloc(size uintptr) unsafe.Pointer {
|
||||||
|
return Alloc(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
//export __wrap_realloc
|
||||||
|
func __wrap_realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||||
|
return Realloc(ptr, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this function MUST be initalized first, which means it's required to be initalized before runtime
|
||||||
|
func initGC() {
|
||||||
|
// reserve 2K blocks for libc internal malloc, we cannot wrap that function
|
||||||
|
heapStart = uintptr(unsafe.Pointer(&_heapStart)) + 2048
|
||||||
|
heapEnd = uintptr(unsafe.Pointer(&_heapEnd))
|
||||||
|
globalsStart = uintptr(unsafe.Pointer(&_globals_start))
|
||||||
|
globalsEnd = uintptr(unsafe.Pointer(&_globals_end))
|
||||||
|
totalSize := heapEnd - heapStart
|
||||||
|
metadataSize := (totalSize + blocksPerStateByte*bytesPerBlock) / (1 + blocksPerStateByte*bytesPerBlock)
|
||||||
|
metadataStart = unsafe.Pointer(heapEnd - metadataSize)
|
||||||
|
endBlock = (uintptr(metadataStart) - heapStart) / bytesPerBlock
|
||||||
|
stackTop = uintptr(unsafe.Pointer(&_stackStart))
|
||||||
|
|
||||||
|
memset(metadataStart, 0, metadataSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lazyInit() {
|
||||||
|
if !isGCInit {
|
||||||
|
initGC()
|
||||||
|
isGCInit = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// blockFromAddr returns a block given an address somewhere in the heap (which
|
// blockFromAddr returns a block given an address somewhere in the heap (which
|
||||||
// might not be heap-aligned).
|
// might not be heap-aligned).
|
||||||
func blockFromAddr(addr uintptr) uintptr {
|
func blockFromAddr(addr uintptr) uintptr {
|
||||||
if addr < memory.HeapStart || addr >= uintptr(memory.MetadataStart) {
|
if addr < heapStart || addr >= uintptr(metadataStart) {
|
||||||
printlnAndPanic("gc: trying to get block from invalid address")
|
println("gc: trying to get block from invalid address")
|
||||||
}
|
}
|
||||||
return (addr - memory.HeapStart) / bytesPerBlock
|
return (addr - heapStart) / bytesPerBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a pointer to the start of the allocated object.
|
// Return a pointer to the start of the allocated object.
|
||||||
@@ -104,9 +163,9 @@ func gcPointerOf(blockAddr uintptr) unsafe.Pointer {
|
|||||||
|
|
||||||
// Return the address of the start of the allocated object.
|
// Return the address of the start of the allocated object.
|
||||||
func gcAddressOf(blockAddr uintptr) uintptr {
|
func gcAddressOf(blockAddr uintptr) uintptr {
|
||||||
addr := memory.HeapStart + blockAddr*bytesPerBlock
|
addr := heapStart + blockAddr*bytesPerBlock
|
||||||
if addr > uintptr(memory.MetadataStart) {
|
if addr > uintptr(metadataStart) {
|
||||||
printlnAndPanic("gc: block pointing inside metadata")
|
println("gc: block pointing inside metadata")
|
||||||
}
|
}
|
||||||
return addr
|
return addr
|
||||||
}
|
}
|
||||||
@@ -137,7 +196,7 @@ func gcFindHead(blockAddr uintptr) uintptr {
|
|||||||
blockAddr--
|
blockAddr--
|
||||||
}
|
}
|
||||||
if gcStateOf(blockAddr) != blockStateHead && gcStateOf(blockAddr) != blockStateMark {
|
if gcStateOf(blockAddr) != blockStateHead && gcStateOf(blockAddr) != blockStateMark {
|
||||||
printlnAndPanic("gc: found tail without head")
|
println("gc: found tail without head")
|
||||||
}
|
}
|
||||||
return blockAddr
|
return blockAddr
|
||||||
}
|
}
|
||||||
@@ -148,14 +207,14 @@ func gcFindNext(blockAddr uintptr) uintptr {
|
|||||||
if gcStateOf(blockAddr) == blockStateHead || gcStateOf(blockAddr) == blockStateMark {
|
if gcStateOf(blockAddr) == blockStateHead || gcStateOf(blockAddr) == blockStateMark {
|
||||||
blockAddr++
|
blockAddr++
|
||||||
}
|
}
|
||||||
for gcAddressOf(blockAddr) < uintptr(memory.MetadataStart) && gcStateOf(blockAddr) == blockStateTail {
|
for gcAddressOf(blockAddr) < uintptr(metadataStart) && gcStateOf(blockAddr) == blockStateTail {
|
||||||
blockAddr++
|
blockAddr++
|
||||||
}
|
}
|
||||||
return blockAddr
|
return blockAddr
|
||||||
}
|
}
|
||||||
|
|
||||||
func gcStateByteOf(blockAddr uintptr) byte {
|
func gcStateByteOf(blockAddr uintptr) byte {
|
||||||
return *(*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte))
|
return *(*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the block state given a state byte. The state byte must have been
|
// Return the block state given a state byte. The state byte must have been
|
||||||
@@ -173,19 +232,19 @@ func gcStateOf(blockAddr uintptr) uint8 {
|
|||||||
// bits than the current state. Allowed transitions: from free to any state and
|
// bits than the current state. Allowed transitions: from free to any state and
|
||||||
// from head to mark.
|
// from head to mark.
|
||||||
func gcSetState(blockAddr uintptr, newState uint8) {
|
func gcSetState(blockAddr uintptr, newState uint8) {
|
||||||
stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte))
|
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte))
|
||||||
*stateBytePtr |= uint8(newState << ((blockAddr % blocksPerStateByte) * stateBits))
|
*stateBytePtr |= uint8(newState << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||||
if gcStateOf(blockAddr) != newState {
|
if gcStateOf(blockAddr) != newState {
|
||||||
printlnAndPanic("gc: setState() was not successful")
|
println("gc: setState() was not successful")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// markFree sets the block state to free, no matter what state it was in before.
|
// markFree sets the block state to free, no matter what state it was in before.
|
||||||
func gcMarkFree(blockAddr uintptr) {
|
func gcMarkFree(blockAddr uintptr) {
|
||||||
stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte))
|
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte))
|
||||||
*stateBytePtr &^= uint8(blockStateMask << ((blockAddr % blocksPerStateByte) * stateBits))
|
*stateBytePtr &^= uint8(blockStateMask << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||||
if gcStateOf(blockAddr) != blockStateFree {
|
if gcStateOf(blockAddr) != blockStateFree {
|
||||||
printlnAndPanic("gc: markFree() was not successful")
|
println("gc: markFree() was not successful")
|
||||||
}
|
}
|
||||||
*(*[wordsPerBlock]uintptr)(unsafe.Pointer(gcAddressOf(blockAddr))) = [wordsPerBlock]uintptr{}
|
*(*[wordsPerBlock]uintptr)(unsafe.Pointer(gcAddressOf(blockAddr))) = [wordsPerBlock]uintptr{}
|
||||||
}
|
}
|
||||||
@@ -194,25 +253,27 @@ func gcMarkFree(blockAddr uintptr) {
|
|||||||
// before calling this function.
|
// before calling this function.
|
||||||
func gcUnmark(blockAddr uintptr) {
|
func gcUnmark(blockAddr uintptr) {
|
||||||
if gcStateOf(blockAddr) != blockStateMark {
|
if gcStateOf(blockAddr) != blockStateMark {
|
||||||
printlnAndPanic("gc: unmark() on a block that is not marked")
|
println("gc: unmark() on a block that is not marked")
|
||||||
}
|
}
|
||||||
clearMask := blockStateMask ^ blockStateHead // the bits to clear from the state
|
clearMask := blockStateMask ^ blockStateHead // the bits to clear from the state
|
||||||
stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte))
|
stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte))
|
||||||
*stateBytePtr &^= uint8(clearMask << ((blockAddr % blocksPerStateByte) * stateBits))
|
*stateBytePtr &^= uint8(clearMask << ((blockAddr % blocksPerStateByte) * stateBits))
|
||||||
if gcStateOf(blockAddr) != blockStateHead {
|
if gcStateOf(blockAddr) != blockStateHead {
|
||||||
printlnAndPanic("gc: unmark() was not successful")
|
println("gc: unmark() was not successful")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isOnHeap(ptr uintptr) bool {
|
func isOnHeap(ptr uintptr) bool {
|
||||||
return ptr >= memory.HeapStart && ptr < uintptr(memory.MetadataStart)
|
return ptr >= heapStart && ptr < uintptr(metadataStart)
|
||||||
}
|
}
|
||||||
|
|
||||||
// alloc tries to find some free space on the heap, possibly doing a garbage
|
// alloc tries to find some free space on the heap, possibly doing a garbage
|
||||||
// collection cycle if needed. If no space is free, it panics.
|
// collection cycle if needed. If no space is free, it panics.
|
||||||
//
|
//
|
||||||
//go:noinline
|
//go:noinline
|
||||||
func alloc(size uintptr) unsafe.Pointer {
|
func Alloc(size uintptr) unsafe.Pointer {
|
||||||
|
lazyInit()
|
||||||
|
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
return unsafe.Pointer(&zeroSizedAlloc)
|
return unsafe.Pointer(&zeroSizedAlloc)
|
||||||
}
|
}
|
||||||
@@ -237,7 +298,7 @@ func alloc(size uintptr) unsafe.Pointer {
|
|||||||
// free memory and try again.
|
// free memory and try again.
|
||||||
heapScanCount = 2
|
heapScanCount = 2
|
||||||
freeBytes := GC()
|
freeBytes := GC()
|
||||||
heapSize := uintptr(memory.MetadataStart) - memory.HeapStart
|
heapSize := uintptr(metadataStart) - heapStart
|
||||||
if freeBytes < heapSize/3 {
|
if freeBytes < heapSize/3 {
|
||||||
// Ensure there is at least 33% headroom.
|
// Ensure there is at least 33% headroom.
|
||||||
// This percentage was arbitrarily chosen, and may need to
|
// This percentage was arbitrarily chosen, and may need to
|
||||||
@@ -254,13 +315,13 @@ func alloc(size uintptr) unsafe.Pointer {
|
|||||||
// Unfortunately the heap could not be increased. This
|
// Unfortunately the heap could not be increased. This
|
||||||
// happens on baremetal systems for example (where all
|
// happens on baremetal systems for example (where all
|
||||||
// available RAM has already been dedicated to the heap).
|
// available RAM has already been dedicated to the heap).
|
||||||
printlnAndPanic("out of memory")
|
println("out of memory")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap around the end of the heap.
|
// Wrap around the end of the heap.
|
||||||
if index == memory.EndBlock {
|
if index == endBlock {
|
||||||
index = 0
|
index = 0
|
||||||
// Reset numFreeBlocks as allocations cannot wrap.
|
// Reset numFreeBlocks as allocations cannot wrap.
|
||||||
numFreeBlocks = 0
|
numFreeBlocks = 0
|
||||||
@@ -296,14 +357,15 @@ func alloc(size uintptr) unsafe.Pointer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return a pointer to this allocation.
|
// Return a pointer to this allocation.
|
||||||
return gcPointerOf(thisAlloc)
|
return memset(gcPointerOf(thisAlloc), 0, size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer {
|
func Realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||||
|
lazyInit()
|
||||||
if ptr == nil {
|
if ptr == nil {
|
||||||
return alloc(size)
|
return Alloc(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
ptrAddress := uintptr(ptr)
|
ptrAddress := uintptr(ptr)
|
||||||
@@ -316,8 +378,8 @@ func realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer {
|
|||||||
return ptr
|
return ptr
|
||||||
}
|
}
|
||||||
|
|
||||||
newAlloc := alloc(size)
|
newAlloc := Alloc(size)
|
||||||
c.Memcpy(newAlloc, ptr, oldSize)
|
memcpy(newAlloc, ptr, oldSize)
|
||||||
free(ptr)
|
free(ptr)
|
||||||
|
|
||||||
return newAlloc
|
return newAlloc
|
||||||
@@ -331,6 +393,8 @@ func free(ptr unsafe.Pointer) {
|
|||||||
// of the runtime.GC() function. The difference is that it returns the number of
|
// of the runtime.GC() function. The difference is that it returns the number of
|
||||||
// free bytes in the heap after the GC is finished.
|
// free bytes in the heap after the GC is finished.
|
||||||
func GC() (freeBytes uintptr) {
|
func GC() (freeBytes uintptr) {
|
||||||
|
lazyInit()
|
||||||
|
|
||||||
if gcDebug {
|
if gcDebug {
|
||||||
println("running collection cycle...")
|
println("running collection cycle...")
|
||||||
}
|
}
|
||||||
@@ -356,19 +420,9 @@ func GC() (freeBytes uintptr) {
|
|||||||
// well (recursively). The start and end parameters must be valid pointers and
|
// well (recursively). The start and end parameters must be valid pointers and
|
||||||
// must be aligned.
|
// must be aligned.
|
||||||
func markRoots(start, end uintptr) {
|
func markRoots(start, end uintptr) {
|
||||||
|
if start >= end {
|
||||||
if true {
|
println("gc: unexpected range to mark")
|
||||||
if start >= end {
|
|
||||||
printlnAndPanic("gc: unexpected range to mark")
|
|
||||||
}
|
|
||||||
if start%unsafe.Alignof(start) != 0 {
|
|
||||||
printlnAndPanic("gc: unaligned start pointer")
|
|
||||||
}
|
|
||||||
if end%unsafe.Alignof(end) != 0 {
|
|
||||||
printlnAndPanic("gc: unaligned end pointer")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reduce the end bound to avoid reading too far on platforms where pointer alignment is smaller than pointer size.
|
// Reduce the end bound to avoid reading too far on platforms where pointer alignment is smaller than pointer size.
|
||||||
// If the size of the range is 0, then end will be slightly below start after this.
|
// If the size of the range is 0, then end will be slightly below start after this.
|
||||||
end -= unsafe.Sizeof(end) - unsafe.Alignof(end)
|
end -= unsafe.Sizeof(end) - unsafe.Alignof(end)
|
||||||
@@ -419,11 +473,8 @@ func startMark(root uintptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mark block.
|
// Mark block.
|
||||||
|
|
||||||
gcSetState(referencedBlock, blockStateMark)
|
gcSetState(referencedBlock, blockStateMark)
|
||||||
|
|
||||||
println("mark: %lx from %lx", gcPointerOf(referencedBlock), gcPointerOf(root))
|
|
||||||
|
|
||||||
if stackLen == len(stack) {
|
if stackLen == len(stack) {
|
||||||
// The stack is full.
|
// The stack is full.
|
||||||
// It is necessary to rescan all marked blocks once we are done.
|
// It is necessary to rescan all marked blocks once we are done.
|
||||||
@@ -446,7 +497,7 @@ func finishMark() {
|
|||||||
for markStackOverflow {
|
for markStackOverflow {
|
||||||
// Re-mark all blocks.
|
// Re-mark all blocks.
|
||||||
markStackOverflow = false
|
markStackOverflow = false
|
||||||
for block := uintptr(0); block < memory.EndBlock; block++ {
|
for block := uintptr(0); block < endBlock; block++ {
|
||||||
if gcStateOf(block) != blockStateMark {
|
if gcStateOf(block) != blockStateMark {
|
||||||
// Block is not marked, so we do not need to rescan it.
|
// Block is not marked, so we do not need to rescan it.
|
||||||
continue
|
continue
|
||||||
@@ -461,7 +512,6 @@ func finishMark() {
|
|||||||
// mark a GC root at the address addr.
|
// mark a GC root at the address addr.
|
||||||
func markRoot(addr, root uintptr) {
|
func markRoot(addr, root uintptr) {
|
||||||
if isOnHeap(root) {
|
if isOnHeap(root) {
|
||||||
println("on the heap: %lx", gcPointerOf(root))
|
|
||||||
block := blockFromAddr(root)
|
block := blockFromAddr(root)
|
||||||
if gcStateOf(block) == blockStateFree {
|
if gcStateOf(block) == blockStateFree {
|
||||||
// The to-be-marked object doesn't actually exist.
|
// The to-be-marked object doesn't actually exist.
|
||||||
@@ -477,14 +527,13 @@ func markRoot(addr, root uintptr) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sweep goes through all memory and frees unmarked memory.
|
// Sweep goes through all memory and frees unmarked
|
||||||
// It returns how many bytes are free in the heap after the sweep.
|
// It returns how many bytes are free in the heap after the sweep.
|
||||||
func sweep() (freeBytes uintptr) {
|
func sweep() (freeBytes uintptr) {
|
||||||
freeCurrentObject := false
|
freeCurrentObject := false
|
||||||
var freed uint64
|
var freed uint64
|
||||||
|
|
||||||
var from uintptr
|
for block := uintptr(0); block < endBlock; block++ {
|
||||||
for block := uintptr(0); block < memory.EndBlock; block++ {
|
|
||||||
switch gcStateOf(block) {
|
switch gcStateOf(block) {
|
||||||
case blockStateHead:
|
case blockStateHead:
|
||||||
// Unmarked head. Free it, including all tail blocks following it.
|
// Unmarked head. Free it, including all tail blocks following it.
|
||||||
@@ -492,7 +541,6 @@ func sweep() (freeBytes uintptr) {
|
|||||||
freeCurrentObject = true
|
freeCurrentObject = true
|
||||||
gcFrees++
|
gcFrees++
|
||||||
freed++
|
freed++
|
||||||
from = block
|
|
||||||
case blockStateTail:
|
case blockStateTail:
|
||||||
if freeCurrentObject {
|
if freeCurrentObject {
|
||||||
// This is a tail object following an unmarked head.
|
// This is a tail object following an unmarked head.
|
||||||
@@ -500,7 +548,6 @@ func sweep() (freeBytes uintptr) {
|
|||||||
gcMarkFree(block)
|
gcMarkFree(block)
|
||||||
freed++
|
freed++
|
||||||
}
|
}
|
||||||
println("free from %lx to %lx", gcPointerOf(from), gcPointerOf(block))
|
|
||||||
case blockStateMark:
|
case blockStateMark:
|
||||||
// This is a marked object. The next tail blocks must not be freed,
|
// This is a marked object. The next tail blocks must not be freed,
|
||||||
// but the mark bit must be removed so the next GC cycle will
|
// but the mark bit must be removed so the next GC cycle will
|
||||||
@@ -524,12 +571,9 @@ func growHeap() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func gcMarkReachable() {
|
func gcMarkReachable() {
|
||||||
// a compiler trick to get current SP
|
println("scan stack", getsp(), unsafe.Pointer(stackTop))
|
||||||
println("scan stack", unsafe.Pointer(getsp()), unsafe.Pointer(memory.StackTop))
|
markRoots(uintptr(getsp()), stackTop)
|
||||||
markRoots(uintptr(getsp()), memory.StackTop)
|
markRoots(globalsStart, globalsEnd)
|
||||||
println("scan global", unsafe.Pointer(memory.GlobalsStart), unsafe.Pointer(memory.GlobalsEnd))
|
|
||||||
|
|
||||||
markRoots(memory.GlobalsStart, memory.GlobalsEnd)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func gcResumeWorld() {
|
func gcResumeWorld() {
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
//go:build baremetal
|
|
||||||
|
|
||||||
package memory
|
|
||||||
|
|
||||||
import "unsafe"
|
|
||||||
|
|
||||||
// no init function, we don't want to init this twice
|
|
||||||
const LLGoPackage = "noinit"
|
|
||||||
|
|
||||||
//go:linkname _heapStart _heapStart
|
|
||||||
var _heapStart [0]byte
|
|
||||||
|
|
||||||
//go:linkname _heapEnd _heapEnd
|
|
||||||
var _heapEnd [0]byte
|
|
||||||
|
|
||||||
//go:linkname _stackStart _stack_top
|
|
||||||
var _stackStart [0]byte
|
|
||||||
|
|
||||||
//go:linkname _globals_start _globals_start
|
|
||||||
var _globals_start [0]byte
|
|
||||||
|
|
||||||
//go:linkname _globals_end _globals_end
|
|
||||||
var _globals_end [0]byte
|
|
||||||
|
|
||||||
// since we don't have an init() function, these should be initalized by initHeap(), which is called by <main> entry
|
|
||||||
var (
|
|
||||||
HeapStart uintptr // start address of heap area
|
|
||||||
HeapEnd uintptr // end address of heap area
|
|
||||||
GlobalsStart uintptr // start address of global variable area
|
|
||||||
GlobalsEnd uintptr // end address of global variable area
|
|
||||||
StackTop uintptr // the top of stack
|
|
||||||
EndBlock uintptr // GC end block index
|
|
||||||
MetadataStart unsafe.Pointer // start address of GC metadata
|
|
||||||
)
|
|
||||||
|
|
||||||
// Some globals + constants for the entire GC.
|
|
||||||
|
|
||||||
const (
|
|
||||||
wordsPerBlock = 4 // number of pointers in an allocated block
|
|
||||||
bytesPerBlock = wordsPerBlock * unsafe.Sizeof(HeapStart)
|
|
||||||
stateBits = 2 // how many bits a block state takes (see blockState type)
|
|
||||||
blocksPerStateByte = 8 / stateBits
|
|
||||||
markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan
|
|
||||||
)
|
|
||||||
|
|
||||||
// zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes.
|
|
||||||
var zeroSizedAlloc uint8
|
|
||||||
|
|
||||||
// when executing initGC(), we must ensure there's no any allocations.
|
|
||||||
// use linking here to avoid import clite
|
|
||||||
//
|
|
||||||
//go:linkname memset C.memset
|
|
||||||
func memset(unsafe.Pointer, int, uintptr)
|
|
||||||
|
|
||||||
// this function MUST be initalized first, which means it's required to be initalized before runtime
|
|
||||||
//
|
|
||||||
//export initGC
|
|
||||||
func initGC() {
|
|
||||||
// reserve 2K blocks for malloc
|
|
||||||
HeapStart = uintptr(unsafe.Pointer(&_heapStart)) + 2048
|
|
||||||
HeapEnd = uintptr(unsafe.Pointer(&_heapEnd))
|
|
||||||
GlobalsStart = uintptr(unsafe.Pointer(&_globals_start))
|
|
||||||
GlobalsEnd = uintptr(unsafe.Pointer(&_globals_end))
|
|
||||||
totalSize := HeapEnd - HeapStart
|
|
||||||
metadataSize := (totalSize + blocksPerStateByte*bytesPerBlock) / (1 + blocksPerStateByte*bytesPerBlock)
|
|
||||||
MetadataStart = unsafe.Pointer(HeapEnd - metadataSize)
|
|
||||||
EndBlock = (uintptr(MetadataStart) - HeapStart) / bytesPerBlock
|
|
||||||
StackTop = uintptr(unsafe.Pointer(&_stackStart))
|
|
||||||
|
|
||||||
memset(MetadataStart, 0, metadataSize)
|
|
||||||
}
|
|
||||||
@@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
c "github.com/goplus/llgo/runtime/internal/clite"
|
c "github.com/goplus/llgo/runtime/internal/clite"
|
||||||
"github.com/goplus/llgo/runtime/internal/clite/bdwgc"
|
"github.com/goplus/llgo/runtime/internal/clite/bdwgc"
|
||||||
|
_ "github.com/goplus/llgo/runtime/internal/runtime/bdwgc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AllocU allocates uninitialized memory.
|
// AllocU allocates uninitialized memory.
|
||||||
|
|||||||
@@ -21,16 +21,15 @@ package runtime
|
|||||||
import (
|
import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
c "github.com/goplus/llgo/runtime/internal/clite"
|
"github.com/goplus/llgo/runtime/internal/runtime/tinygogc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AllocU allocates uninitialized memory.
|
// AllocU allocates uninitialized memory.
|
||||||
func AllocU(size uintptr) unsafe.Pointer {
|
func AllocU(size uintptr) unsafe.Pointer {
|
||||||
return alloc(size)
|
return tinygogc.Alloc(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllocZ allocates zero-initialized memory.
|
// AllocZ allocates zero-initialized memory.
|
||||||
func AllocZ(size uintptr) unsafe.Pointer {
|
func AllocZ(size uintptr) unsafe.Pointer {
|
||||||
ptr := alloc(size)
|
return tinygogc.Alloc(size)
|
||||||
return c.Memset(ptr, 0, size)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,6 +92,12 @@ SECTIONS
|
|||||||
_iram_end = .;
|
_iram_end = .;
|
||||||
} > iram_seg
|
} > iram_seg
|
||||||
|
|
||||||
|
.stack (NOLOAD) :
|
||||||
|
{
|
||||||
|
. += 16K;
|
||||||
|
__stack = .;
|
||||||
|
} > dram_seg
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This section is required to skip .iram0.text area because iram0_0_seg and
|
* This section is required to skip .iram0.text area because iram0_0_seg and
|
||||||
* dram0_0_seg reflect the same address space on different buses.
|
* dram0_0_seg reflect the same address space on different buses.
|
||||||
@@ -102,14 +108,6 @@ SECTIONS
|
|||||||
. += ORIGIN(iram_seg) == ORIGIN(dram_seg) ? 0 : _iram_end - _iram_start;
|
. += ORIGIN(iram_seg) == ORIGIN(dram_seg) ? 0 : _iram_end - _iram_start;
|
||||||
} > dram_seg
|
} > dram_seg
|
||||||
|
|
||||||
.stack (NOLOAD) :
|
|
||||||
{
|
|
||||||
__stack_end = .;
|
|
||||||
. = ALIGN(16);
|
|
||||||
. += 16K;
|
|
||||||
__stack = .;
|
|
||||||
}
|
|
||||||
|
|
||||||
.data :
|
.data :
|
||||||
{
|
{
|
||||||
_data_start = .;
|
_data_start = .;
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ SECTIONS
|
|||||||
|
|
||||||
.stack (NOLOAD) :
|
.stack (NOLOAD) :
|
||||||
{
|
{
|
||||||
__stack_end = .;
|
|
||||||
. = ALIGN(16);
|
. = ALIGN(16);
|
||||||
. += 16K;
|
. += 16K;
|
||||||
__stack = .;
|
__stack = .;
|
||||||
@@ -176,3 +175,108 @@ _globals_start = _data_start;
|
|||||||
_globals_end = _end;
|
_globals_end = _end;
|
||||||
_heapStart = _end;
|
_heapStart = _end;
|
||||||
_stack_top = __stack;
|
_stack_top = __stack;
|
||||||
|
|
||||||
|
|
||||||
|
/* From ESP-IDF:
|
||||||
|
* components/esp_rom/esp32/ld/esp32.rom.newlib-funcs.ld
|
||||||
|
* This is the subset that is sometimes used by LLVM during codegen, and thus
|
||||||
|
* must always be present.
|
||||||
|
*/
|
||||||
|
memcpy = 0x4000c2c8;
|
||||||
|
memmove = 0x4000c3c0;
|
||||||
|
memset = 0x4000c44c;
|
||||||
|
|
||||||
|
/* From ESP-IDF:
|
||||||
|
* components/esp_rom/esp32/ld/esp32.rom.libgcc.ld
|
||||||
|
* These are called from LLVM during codegen. The original license is Apache
|
||||||
|
* 2.0, but I believe that a list of function names and addresses can't really
|
||||||
|
* be copyrighted.
|
||||||
|
*/
|
||||||
|
__absvdi2 = 0x4006387c;
|
||||||
|
__absvsi2 = 0x40063868;
|
||||||
|
__adddf3 = 0x40002590;
|
||||||
|
__addsf3 = 0x400020e8;
|
||||||
|
__addvdi3 = 0x40002cbc;
|
||||||
|
__addvsi3 = 0x40002c98;
|
||||||
|
__ashldi3 = 0x4000c818;
|
||||||
|
__ashrdi3 = 0x4000c830;
|
||||||
|
__bswapdi2 = 0x40064b08;
|
||||||
|
__bswapsi2 = 0x40064ae0;
|
||||||
|
__clrsbdi2 = 0x40064b7c;
|
||||||
|
__clrsbsi2 = 0x40064b64;
|
||||||
|
__clzdi2 = 0x4000ca50;
|
||||||
|
__clzsi2 = 0x4000c7e8;
|
||||||
|
__cmpdi2 = 0x40063820;
|
||||||
|
__ctzdi2 = 0x4000ca64;
|
||||||
|
__ctzsi2 = 0x4000c7f0;
|
||||||
|
__divdc3 = 0x400645a4;
|
||||||
|
__divdf3 = 0x40002954;
|
||||||
|
__divdi3 = 0x4000ca84;
|
||||||
|
__divsi3 = 0x4000c7b8;
|
||||||
|
__eqdf2 = 0x400636a8;
|
||||||
|
__eqsf2 = 0x40063374;
|
||||||
|
__extendsfdf2 = 0x40002c34;
|
||||||
|
__ffsdi2 = 0x4000ca2c;
|
||||||
|
__ffssi2 = 0x4000c804;
|
||||||
|
__fixdfdi = 0x40002ac4;
|
||||||
|
__fixdfsi = 0x40002a78;
|
||||||
|
__fixsfdi = 0x4000244c;
|
||||||
|
__fixsfsi = 0x4000240c;
|
||||||
|
__fixunsdfsi = 0x40002b30;
|
||||||
|
__fixunssfdi = 0x40002504;
|
||||||
|
__fixunssfsi = 0x400024ac;
|
||||||
|
__floatdidf = 0x4000c988;
|
||||||
|
__floatdisf = 0x4000c8c0;
|
||||||
|
__floatsidf = 0x4000c944;
|
||||||
|
__floatsisf = 0x4000c870;
|
||||||
|
__floatundidf = 0x4000c978;
|
||||||
|
__floatundisf = 0x4000c8b0;
|
||||||
|
__floatunsidf = 0x4000c938;
|
||||||
|
__floatunsisf = 0x4000c864;
|
||||||
|
__gcc_bcmp = 0x40064a70;
|
||||||
|
__gedf2 = 0x40063768;
|
||||||
|
__gesf2 = 0x4006340c;
|
||||||
|
__gtdf2 = 0x400636dc;
|
||||||
|
__gtsf2 = 0x400633a0;
|
||||||
|
__ledf2 = 0x40063704;
|
||||||
|
__lesf2 = 0x400633c0;
|
||||||
|
__lshrdi3 = 0x4000c84c;
|
||||||
|
__ltdf2 = 0x40063790;
|
||||||
|
__ltsf2 = 0x4006342c;
|
||||||
|
__moddi3 = 0x4000cd4c;
|
||||||
|
__modsi3 = 0x4000c7c0;
|
||||||
|
__muldc3 = 0x40063c90;
|
||||||
|
__muldf3 = 0x4006358c;
|
||||||
|
__muldi3 = 0x4000c9fc;
|
||||||
|
__mulsf3 = 0x400632c8;
|
||||||
|
__mulsi3 = 0x4000c7b0;
|
||||||
|
__mulvdi3 = 0x40002d78;
|
||||||
|
__mulvsi3 = 0x40002d60;
|
||||||
|
__nedf2 = 0x400636a8;
|
||||||
|
__negdf2 = 0x400634a0;
|
||||||
|
__negdi2 = 0x4000ca14;
|
||||||
|
__negsf2 = 0x400020c0;
|
||||||
|
__negvdi2 = 0x40002e98;
|
||||||
|
__negvsi2 = 0x40002e78;
|
||||||
|
__nesf2 = 0x40063374;
|
||||||
|
__nsau_data = 0x3ff96544;
|
||||||
|
__paritysi2 = 0x40002f3c;
|
||||||
|
__popcount_tab = 0x3ff96544;
|
||||||
|
__popcountdi2 = 0x40002ef8;
|
||||||
|
__popcountsi2 = 0x40002ed0;
|
||||||
|
__powidf2 = 0x400638e4;
|
||||||
|
__subdf3 = 0x400026e4;
|
||||||
|
__subsf3 = 0x400021d0;
|
||||||
|
__subvdi3 = 0x40002d20;
|
||||||
|
__subvsi3 = 0x40002cf8;
|
||||||
|
__truncdfsf2 = 0x40002b90;
|
||||||
|
__ucmpdi2 = 0x40063840;
|
||||||
|
__udiv_w_sdiv = 0x40064bec;
|
||||||
|
__udivdi3 = 0x4000cff8;
|
||||||
|
__udivmoddi4 = 0x40064bf4;
|
||||||
|
__udivsi3 = 0x4000c7c8;
|
||||||
|
__umoddi3 = 0x4000d280;
|
||||||
|
__umodsi3 = 0x4000c7d0;
|
||||||
|
__umulsidi3 = 0x4000c7d8;
|
||||||
|
__unorddf2 = 0x400637f4;
|
||||||
|
__unordsf2 = 0x40063478;
|
||||||
|
|||||||
Reference in New Issue
Block a user