diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 2e706690..e09403fe 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -52,6 +52,11 @@ jobs: if: ${{!startsWith(matrix.os, 'macos')}} run: go test ./... + - name: Test Baremetal GC + if: ${{!startsWith(matrix.os, 'macos')}} + working-directory: runtime/internal/runtime/tinygogc + run: go test -tags testGC . + - name: Test with coverage if: startsWith(matrix.os, 'macos') run: go test -coverprofile="coverage.txt" -covermode=atomic ./... diff --git a/runtime/internal/clite/stdio_baremetal.go b/runtime/internal/clite/stdio_baremetal.go index 49b8cdc1..f318c065 100644 --- a/runtime/internal/clite/stdio_baremetal.go +++ b/runtime/internal/clite/stdio_baremetal.go @@ -29,7 +29,12 @@ var Stderr FilePtr = Stdout //go:linkname setvbuf C.setvbuf func setvbuf(fp FilePtr, buf *Char, typ Int, size SizeT) +const ( + _IONBF = 2 // No buffering - immediate output +) + func init() { - setvbuf(Stdout, nil, 2, 0) - setvbuf(Stdin, nil, 2, 0) + // Disable buffering for baremetal targets to ensure immediate output + setvbuf(Stdout, nil, _IONBF, 0) + setvbuf(Stdin, nil, _IONBF, 0) } diff --git a/runtime/internal/clite/stdio_darwin.go b/runtime/internal/clite/stdio_darwin.go index 324403f5..cd30c073 100644 --- a/runtime/internal/clite/stdio_darwin.go +++ b/runtime/internal/clite/stdio_darwin.go @@ -1,5 +1,4 @@ -//go:build darwin -// +build darwin +//go:build darwin && !baremetal /* * Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved. diff --git a/runtime/internal/runtime/tinygogc/gc.go b/runtime/internal/runtime/tinygogc/gc.go index 0f91b82a..bfe4149e 100644 --- a/runtime/internal/runtime/tinygogc/gc.go +++ b/runtime/internal/runtime/tinygogc/gc.go @@ -1,3 +1,22 @@ +//go:build baremetal && !testGC + package tinygogc +import "unsafe" + const LLGoPackage = "link: --wrap=malloc --wrap=realloc --wrap=calloc" + +//export __wrap_malloc +func __wrap_malloc(size uintptr) unsafe.Pointer { + return Alloc(size) +} + +//export __wrap_calloc +func __wrap_calloc(size uintptr) unsafe.Pointer { + return Alloc(size) +} + +//export __wrap_realloc +func __wrap_realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer { + return Realloc(ptr, size) +} diff --git a/runtime/internal/runtime/tinygogc/gc_llgo.go b/runtime/internal/runtime/tinygogc/gc_llgo.go new file mode 100644 index 00000000..5f2d73e8 --- /dev/null +++ b/runtime/internal/runtime/tinygogc/gc_llgo.go @@ -0,0 +1,35 @@ +//go:build !testGC + +package tinygogc + +import ( + "unsafe" + _ "unsafe" +) + +//go:linkname getsp llgo.stackSave +func getsp() unsafe.Pointer + +// when executing initGC(), we must ensure there's no any allocations. +// use linking here to avoid import clite +// +//go:linkname memset C.memset +func memset(unsafe.Pointer, int, uintptr) unsafe.Pointer + +//go:linkname memcpy C.memcpy +func memcpy(unsafe.Pointer, unsafe.Pointer, uintptr) + +//go:linkname _heapStart _heapStart +var _heapStart [0]byte + +//go:linkname _heapEnd _heapEnd +var _heapEnd [0]byte + +//go:linkname _stackStart _stack_top +var _stackStart [0]byte + +//go:linkname _globals_start _globals_start +var _globals_start [0]byte + +//go:linkname _globals_end _globals_end +var _globals_end [0]byte diff --git a/runtime/internal/runtime/tinygogc/gc_test.go b/runtime/internal/runtime/tinygogc/gc_test.go new file mode 100644 index 00000000..0234b5fa --- /dev/null +++ b/runtime/internal/runtime/tinygogc/gc_test.go @@ -0,0 +1,35 @@ +//go:build testGC + +package tinygogc + +import ( + "unsafe" + _ "unsafe" +) + +var currentStack uintptr + +func getsp() uintptr { + return currentStack +} + +var _heapStart [0]byte + +var _heapEnd [0]byte + +var _stackStart [0]byte + +var _globals_start [0]byte + +var _globals_end [0]byte + +//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers +func memclrNoHeapPointers(unsafe.Pointer, uintptr) unsafe.Pointer + +//go:linkname memcpy runtime.memmove +func memcpy(to unsafe.Pointer, from unsafe.Pointer, size uintptr) + +func memset(ptr unsafe.Pointer, n int, size uintptr) unsafe.Pointer { + memclrNoHeapPointers(ptr, size) + return ptr +} diff --git a/runtime/internal/runtime/tinygogc/gc_tinygo.go b/runtime/internal/runtime/tinygogc/gc_tinygo.go index 2efcda91..1081dafb 100644 --- a/runtime/internal/runtime/tinygogc/gc_tinygo.go +++ b/runtime/internal/runtime/tinygogc/gc_tinygo.go @@ -1,4 +1,4 @@ -//go:build baremetal +//go:build baremetal || testGC /* * Copyright (c) 2018-2025 The TinyGo Authors. All rights reserved. @@ -20,17 +20,12 @@ package tinygogc import ( "unsafe" - _ "unsafe" ) const gcDebug = false -const needsStaticHeap = true - -// Provide some abc.Straction over heap blocks. // blockState stores the four states in which a block can be. It is two bits in // size. - const ( blockStateFree uint8 = 0 // 00 blockStateHead uint8 = 1 // 01 @@ -46,33 +41,6 @@ const blockStateByteAllTails = 0 | uint8(blockStateTail<<(stateBits*1)) | uint8(blockStateTail<<(stateBits*0)) -//go:linkname getsp llgo.stackSave -func getsp() unsafe.Pointer - -// when executing initGC(), we must ensure there's no any allocations. -// use linking here to avoid import clite -// -//go:linkname memset C.memset -func memset(unsafe.Pointer, int, uintptr) unsafe.Pointer - -//go:linkname memcpy C.memcpy -func memcpy(unsafe.Pointer, unsafe.Pointer, uintptr) - -//go:linkname _heapStart _heapStart -var _heapStart [0]byte - -//go:linkname _heapEnd _heapEnd -var _heapEnd [0]byte - -//go:linkname _stackStart _stack_top -var _stackStart [0]byte - -//go:linkname _globals_start _globals_start -var _globals_start [0]byte - -//go:linkname _globals_end _globals_end -var _globals_end [0]byte - // since we don't have an init() function, these should be initalized by initHeap(), which is called by
entry var ( heapStart uintptr // start address of heap area @@ -109,24 +77,9 @@ const ( markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan ) -//export __wrap_malloc -func __wrap_malloc(size uintptr) unsafe.Pointer { - return Alloc(size) -} - -//export __wrap_calloc -func __wrap_calloc(size uintptr) unsafe.Pointer { - return Alloc(size) -} - -//export __wrap_realloc -func __wrap_realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer { - return Realloc(ptr, size) -} - // this function MUST be initalized first, which means it's required to be initalized before runtime func initGC() { - // reserve 2K blocks for libc internal malloc, we cannot wrap that function + // reserve 2K blocks for libc internal malloc, we cannot wrap those internal functions heapStart = uintptr(unsafe.Pointer(&_heapStart)) + 2048 heapEnd = uintptr(unsafe.Pointer(&_heapEnd)) globalsStart = uintptr(unsafe.Pointer(&_globals_start)) @@ -571,7 +524,6 @@ func growHeap() bool { } func gcMarkReachable() { - println("scan stack", getsp(), unsafe.Pointer(stackTop)) markRoots(uintptr(getsp()), stackTop) markRoots(globalsStart, globalsEnd) } diff --git a/runtime/internal/runtime/tinygogc/pc_mock_test.go b/runtime/internal/runtime/tinygogc/pc_mock_test.go new file mode 100644 index 00000000..9c37b136 --- /dev/null +++ b/runtime/internal/runtime/tinygogc/pc_mock_test.go @@ -0,0 +1,377 @@ +//go:build testGC + +package tinygogc + +import ( + "testing" + "unsafe" +) + +const ( + // Mock a typical embedded system with 128KB RAM + mockHeapSize = 128 * 1024 // 128KB + mockGlobalsSize = 4 * 1024 // 4KB for globals + mockStackSize = 8 * 1024 // 8KB for stack + mockReservedSize = 2048 // 2KB reserved as in real implementation +) + +type testObject struct { + data [4]uintptr +} + +// mockMemoryLayout simulates the memory layout of an embedded system +type mockMemoryLayout struct { + memory []byte + heapStart uintptr + heapEnd uintptr + globalsStart uintptr + globalsEnd uintptr + stackStart uintptr + stackEnd uintptr +} + +// createMockMemoryLayout creates a simulated 128KB memory environment +func createMockMemoryLayout() *mockMemoryLayout { + totalMemory := mockHeapSize + mockGlobalsSize + mockStackSize + memory := make([]byte, totalMemory) + baseAddr := uintptr(unsafe.Pointer(&memory[0])) + + layout := &mockMemoryLayout{ + memory: memory, + globalsStart: baseAddr, + globalsEnd: baseAddr + mockGlobalsSize, + heapStart: baseAddr + mockGlobalsSize + mockReservedSize, + heapEnd: baseAddr + mockGlobalsSize + mockHeapSize, + stackStart: baseAddr + mockGlobalsSize + mockHeapSize, + stackEnd: baseAddr + uintptr(totalMemory), + } + + return layout +} + +// setupMockGC initializes the GC with mock memory layout +func (m *mockMemoryLayout) setupMockGC() { + // Set mock values + heapStart = m.heapStart + heapEnd = m.heapEnd + globalsStart = m.globalsStart + globalsEnd = m.globalsEnd + stackTop = m.stackEnd + + // Set currentStack to the start of the mock stack + currentStack = m.stackStart + + // Calculate metadata layout + totalSize := heapEnd - heapStart + metadataSize := (totalSize + blocksPerStateByte*bytesPerBlock) / (1 + blocksPerStateByte*bytesPerBlock) + metadataStart = unsafe.Pointer(heapEnd - metadataSize) + endBlock = (uintptr(metadataStart) - heapStart) / bytesPerBlock + + // Clear metadata + metadataBytes := (*[1024]byte)(metadataStart)[:metadataSize:metadataSize] + for i := range metadataBytes { + metadataBytes[i] = 0 + } + + // Reset allocator state + nextAlloc = 0 + isGCInit = true +} + +// createTestObjects creates a network of objects for testing reachability +func createTestObjects(layout *mockMemoryLayout) []*testObject { + // Allocate several test objects + objects := make([]*testObject, 0, 10) + + // Dependencies Graph + // root1 -> child1 -> grandchild1 -> child2 + // root1 -> child2 -> grandchild1 + + // Create root objects (reachable from stack/globals) + root1 := (*testObject)(Alloc(unsafe.Sizeof(testObject{}))) + root2 := (*testObject)(Alloc(unsafe.Sizeof(testObject{}))) + objects = append(objects, root1, root2) + + // Create objects reachable from root1 + child1 := (*testObject)(Alloc(unsafe.Sizeof(testObject{}))) + child2 := (*testObject)(Alloc(unsafe.Sizeof(testObject{}))) + root1.data[0] = uintptr(unsafe.Pointer(child1)) + root1.data[1] = uintptr(unsafe.Pointer(child2)) + objects = append(objects, child1, child2) + + // Create objects reachable from child1 + grandchild1 := (*testObject)(Alloc(unsafe.Sizeof(testObject{}))) + child1.data[0] = uintptr(unsafe.Pointer(grandchild1)) + objects = append(objects, grandchild1) + + // Create circular reference between child2 and grandchild1 + child2.data[0] = uintptr(unsafe.Pointer(grandchild1)) + grandchild1.data[0] = uintptr(unsafe.Pointer(child2)) + + // Create unreachable objects (garbage) + garbage1 := (*testObject)(Alloc(unsafe.Sizeof(testObject{}))) + garbage2 := (*testObject)(Alloc(unsafe.Sizeof(testObject{}))) + // Create circular reference in garbage + garbage1.data[0] = uintptr(unsafe.Pointer(garbage2)) + garbage2.data[0] = uintptr(unsafe.Pointer(garbage1)) + objects = append(objects, garbage1, garbage2) + + return objects +} + +// mockStackScan simulates scanning stack for root pointers +func mockStackScan(roots []*testObject) { + // Simulate stack by creating local variables pointing to roots + + for _, root := range roots[:2] { // Only first 2 are actually roots + addr := uintptr(unsafe.Pointer(&root)) + ptr := uintptr(unsafe.Pointer(root)) + markRoot(addr, ptr) + } +} + +func TestMockGCBasicAllocation(t *testing.T) { + layout := createMockMemoryLayout() + layout.setupMockGC() + + // Test basic allocation + ptr1 := Alloc(32) + if ptr1 == nil { + t.Fatal("Failed to allocate 32 bytes") + } + + ptr2 := Alloc(64) + if ptr2 == nil { + t.Fatal("Failed to allocate 64 bytes") + } + + // Verify pointers are within heap bounds + addr1 := uintptr(ptr1) + addr2 := uintptr(ptr2) + + if addr1 < heapStart || addr1 >= uintptr(metadataStart) { + t.Errorf("ptr1 %x not within heap bounds [%x, %x)", addr1, heapStart, uintptr(metadataStart)) + } + + if addr2 < heapStart || addr2 >= uintptr(metadataStart) { + t.Errorf("ptr2 %x not within heap bounds [%x, %x)", addr2, heapStart, uintptr(metadataStart)) + } + + t.Logf("Allocated ptr1 at %x, ptr2 at %x", addr1, addr2) + t.Logf("Heap bounds: [%x, %x)", heapStart, uintptr(metadataStart)) +} + +func TestMockGCReachabilityAndSweep(t *testing.T) { + layout := createMockMemoryLayout() + layout.setupMockGC() + + // Track initial stats + initialMallocs := gcMallocs + initialFrees := gcFrees + + // Create test object network + objects := createTestObjects(layout) + roots := objects[:2] // First 2 are roots + + t.Logf("Created %d objects, %d are roots", len(objects), len(roots)) + t.Logf("Mallocs: %d", gcMallocs-initialMallocs) + + // Verify all objects are initially allocated + for i, obj := range objects { + addr := uintptr(unsafe.Pointer(obj)) + block := blockFromAddr(addr) + state := gcStateOf(block) + if state != blockStateHead { + t.Errorf("Object %d at %x has state %d, expected %d (HEAD)", i, addr, state, blockStateHead) + } + } + + // Perform GC with manual root scanning + // Mark reachable objects first + mockStackScan(roots) + finishMark() + + // Then sweep unreachable objects + freedBytes := sweep() + t.Logf("Freed %d bytes during GC", freedBytes) + t.Logf("Frees: %d (delta: %d)", gcFrees, gcFrees-initialFrees) + + // Verify reachable objects are still allocated + reachableObjects := []unsafe.Pointer{ + unsafe.Pointer(objects[0]), // root1 + unsafe.Pointer(objects[1]), // root2 + unsafe.Pointer(objects[2]), // child1 (reachable from root1) + unsafe.Pointer(objects[3]), // child2 (reachable from root1) + unsafe.Pointer(objects[4]), // grandchild1 (reachable from child1, child2) + } + + for i, obj := range reachableObjects { + addr := uintptr(obj) + block := blockFromAddr(addr) + state := gcStateOf(block) + if state != blockStateHead { + t.Errorf("Reachable object %d at %x has state %d, expected %d (HEAD)", i, addr, state, blockStateHead) + } + } + + // Verify unreachable objects are freed + unreachableObjects := []unsafe.Pointer{ + unsafe.Pointer(objects[5]), // garbage1 + unsafe.Pointer(objects[6]), // garbage2 + } + + for i, obj := range unreachableObjects { + addr := uintptr(obj) + block := blockFromAddr(addr) + state := gcStateOf(block) + if state != blockStateFree { + t.Errorf("Unreachable object %d at %x has state %d, expected %d (FREE)", i, addr, state, blockStateFree) + } + } + + // Verify some memory was actually freed + if freedBytes == 0 { + t.Error("Expected some memory to be freed, but freed 0 bytes") + } + + if gcFrees == initialFrees { + t.Error("Expected some objects to be freed, but free count didn't change") + } + + // clear ref for grandchild + objects[2].data[0] = 0 + objects[3].data[0] = 0 + + // Perform GC with manual root scanning + // Mark reachable objects first + mockStackScan(roots) + finishMark() + + // Then sweep unreachable objects + freedBytes = sweep() + + blockAddr := blockFromAddr(uintptr(unsafe.Pointer(objects[3]))) + + state := gcStateOf(blockAddr) + if state != blockStateHead { + t.Errorf("Unreachable object %d at %x has state %d, expected %d (HEAD)", 3, blockAddr, state, blockStateHead) + } + + blockAddr = blockFromAddr(uintptr(unsafe.Pointer(objects[4]))) + + state = gcStateOf(blockAddr) + if state != blockStateFree { + t.Errorf("Reachable object %d at %x has state %d, expected %d (HEAD)", 4, blockAddr, state, blockStateHead) + } +} + +func TestMockGCMemoryPressure(t *testing.T) { + layout := createMockMemoryLayout() + layout.setupMockGC() + + // Calculate available heap space + heapSize := uintptr(metadataStart) - heapStart + blockSize := bytesPerBlock + maxBlocks := heapSize / blockSize + + t.Logf("Heap size: %d bytes, Block size: %d bytes, Max blocks: %d", + heapSize, blockSize, maxBlocks) + + // Allocate until we trigger GC + var allocations []unsafe.Pointer + allocSize := uintptr(32) // Small allocations + + // Allocate about 80% of heap to trigger GC pressure + targetAllocations := int(maxBlocks * 4 / 5) // 80% capacity + + for i := 0; i < targetAllocations; i++ { + ptr := Alloc(allocSize) + if ptr == nil { + t.Fatalf("Failed to allocate at iteration %d", i) + } + allocations = append(allocations, ptr) + } + + initialMallocs := gcMallocs + t.Logf("Allocated %d objects (%d mallocs total)", len(allocations), initialMallocs) + + // Clear references to half the allocations (make them garbage) + garbageCount := len(allocations) / 2 + allocations = allocations[garbageCount:] + + // Force GC + freeBytes := GC() + + t.Logf("GC freed %d bytes", freeBytes) + t.Logf("Objects freed: %d", gcFrees) + + // Try to allocate more after GC + for i := 0; i < 10; i++ { + ptr := Alloc(allocSize) + if ptr == nil { + t.Fatalf("Failed to allocate after GC at iteration %d", i) + } + } + + t.Log("Successfully allocated more objects after GC") +} + +func TestMockGCCircularReferences(t *testing.T) { + layout := createMockMemoryLayout() + layout.setupMockGC() + + type Node struct { + data [3]uintptr + next uintptr + } + + // Create a circular linked list + nodes := make([]*Node, 5) + for i := range nodes { + nodes[i] = (*Node)(Alloc(unsafe.Sizeof(Node{}))) + nodes[i].data[0] = uintptr(i) // Store index as data + } + + // Link them in a circle + for i := range nodes { + nextIdx := (i + 1) % len(nodes) + nodes[i].next = uintptr(unsafe.Pointer(nodes[nextIdx])) + } + + t.Logf("Created circular list of %d nodes", len(nodes)) + + // Initially all should be allocated + for i, node := range nodes { + addr := uintptr(unsafe.Pointer(node)) + block := blockFromAddr(addr) + state := gcStateOf(block) + if state != blockStateHead { + t.Errorf("Node %d at %x has state %d, expected %d", i, addr, state, blockStateHead) + } + } + + // Clear references (make entire circle unreachable) + // for i := range nodes { + // nodes[zi] = nil + // } + + // Force GC without roots + freeBytes := GC() + + t.Logf("GC freed %d bytes", freeBytes) + + // All nodes should now be freed since they're not reachable + // Note: We can't check the specific nodes since we cleared the references, + // but we can verify that significant memory was freed + expectedFreed := uintptr(len(nodes)) * ((unsafe.Sizeof(Node{}) + bytesPerBlock - 1) / bytesPerBlock) * bytesPerBlock + + if freeBytes < expectedFreed { + t.Errorf("Expected at least %d bytes freed, got %d", expectedFreed, freeBytes) + } + + // Verify we can allocate new objects in the freed space + newPtr := Alloc(unsafe.Sizeof(Node{})) + if newPtr == nil { + t.Error("Failed to allocate after freeing circular references") + } +} diff --git a/runtime/internal/runtime/z_gc.go b/runtime/internal/runtime/z_gc.go index 5669667b..8bb820be 100644 --- a/runtime/internal/runtime/z_gc.go +++ b/runtime/internal/runtime/z_gc.go @@ -23,7 +23,6 @@ import ( c "github.com/goplus/llgo/runtime/internal/clite" "github.com/goplus/llgo/runtime/internal/clite/bdwgc" - _ "github.com/goplus/llgo/runtime/internal/runtime/bdwgc" ) // AllocU allocates uninitialized memory. diff --git a/targets/esp32.app.elf.ld b/targets/esp32.app.elf.ld index e3c27fdc..94b90611 100755 --- a/targets/esp32.app.elf.ld +++ b/targets/esp32.app.elf.ld @@ -175,108 +175,3 @@ _globals_start = _data_start; _globals_end = _end; _heapStart = _end; _stack_top = __stack; - - -/* From ESP-IDF: - * components/esp_rom/esp32/ld/esp32.rom.newlib-funcs.ld - * This is the subset that is sometimes used by LLVM during codegen, and thus - * must always be present. - */ -memcpy = 0x4000c2c8; -memmove = 0x4000c3c0; -memset = 0x4000c44c; - -/* From ESP-IDF: - * components/esp_rom/esp32/ld/esp32.rom.libgcc.ld - * These are called from LLVM during codegen. The original license is Apache - * 2.0, but I believe that a list of function names and addresses can't really - * be copyrighted. - */ -__absvdi2 = 0x4006387c; -__absvsi2 = 0x40063868; -__adddf3 = 0x40002590; -__addsf3 = 0x400020e8; -__addvdi3 = 0x40002cbc; -__addvsi3 = 0x40002c98; -__ashldi3 = 0x4000c818; -__ashrdi3 = 0x4000c830; -__bswapdi2 = 0x40064b08; -__bswapsi2 = 0x40064ae0; -__clrsbdi2 = 0x40064b7c; -__clrsbsi2 = 0x40064b64; -__clzdi2 = 0x4000ca50; -__clzsi2 = 0x4000c7e8; -__cmpdi2 = 0x40063820; -__ctzdi2 = 0x4000ca64; -__ctzsi2 = 0x4000c7f0; -__divdc3 = 0x400645a4; -__divdf3 = 0x40002954; -__divdi3 = 0x4000ca84; -__divsi3 = 0x4000c7b8; -__eqdf2 = 0x400636a8; -__eqsf2 = 0x40063374; -__extendsfdf2 = 0x40002c34; -__ffsdi2 = 0x4000ca2c; -__ffssi2 = 0x4000c804; -__fixdfdi = 0x40002ac4; -__fixdfsi = 0x40002a78; -__fixsfdi = 0x4000244c; -__fixsfsi = 0x4000240c; -__fixunsdfsi = 0x40002b30; -__fixunssfdi = 0x40002504; -__fixunssfsi = 0x400024ac; -__floatdidf = 0x4000c988; -__floatdisf = 0x4000c8c0; -__floatsidf = 0x4000c944; -__floatsisf = 0x4000c870; -__floatundidf = 0x4000c978; -__floatundisf = 0x4000c8b0; -__floatunsidf = 0x4000c938; -__floatunsisf = 0x4000c864; -__gcc_bcmp = 0x40064a70; -__gedf2 = 0x40063768; -__gesf2 = 0x4006340c; -__gtdf2 = 0x400636dc; -__gtsf2 = 0x400633a0; -__ledf2 = 0x40063704; -__lesf2 = 0x400633c0; -__lshrdi3 = 0x4000c84c; -__ltdf2 = 0x40063790; -__ltsf2 = 0x4006342c; -__moddi3 = 0x4000cd4c; -__modsi3 = 0x4000c7c0; -__muldc3 = 0x40063c90; -__muldf3 = 0x4006358c; -__muldi3 = 0x4000c9fc; -__mulsf3 = 0x400632c8; -__mulsi3 = 0x4000c7b0; -__mulvdi3 = 0x40002d78; -__mulvsi3 = 0x40002d60; -__nedf2 = 0x400636a8; -__negdf2 = 0x400634a0; -__negdi2 = 0x4000ca14; -__negsf2 = 0x400020c0; -__negvdi2 = 0x40002e98; -__negvsi2 = 0x40002e78; -__nesf2 = 0x40063374; -__nsau_data = 0x3ff96544; -__paritysi2 = 0x40002f3c; -__popcount_tab = 0x3ff96544; -__popcountdi2 = 0x40002ef8; -__popcountsi2 = 0x40002ed0; -__powidf2 = 0x400638e4; -__subdf3 = 0x400026e4; -__subsf3 = 0x400021d0; -__subvdi3 = 0x40002d20; -__subvsi3 = 0x40002cf8; -__truncdfsf2 = 0x40002b90; -__ucmpdi2 = 0x40063840; -__udiv_w_sdiv = 0x40064bec; -__udivdi3 = 0x4000cff8; -__udivmoddi4 = 0x40064bf4; -__udivsi3 = 0x4000c7c8; -__umoddi3 = 0x4000d280; -__umodsi3 = 0x4000c7d0; -__umulsidi3 = 0x4000c7d8; -__unorddf2 = 0x400637f4; -__unordsf2 = 0x40063478;