diff --git a/internal/build/build.go b/internal/build/build.go index b94e88e0..e0f18ea8 100644 --- a/internal/build/build.go +++ b/internal/build/build.go @@ -997,10 +997,6 @@ define weak void @runtime.init() { ret void } -define weak void @initGC() { - ret void -} - ; TODO(lijie): workaround for syscall patch define weak void @"syscall.init"() { ret void @@ -1012,7 +1008,6 @@ define weak void @"syscall.init"() { _llgo_0: store i32 %%0, ptr @__llgo_argc, align 4 store ptr %%1, ptr @__llgo_argv, align 8 - call void @initGC() %s %s %s diff --git a/runtime/internal/clite/bdwgc/bdwgc.go b/runtime/internal/clite/bdwgc/bdwgc.go index 83bf5f91..8f0af818 100644 --- a/runtime/internal/clite/bdwgc/bdwgc.go +++ b/runtime/internal/clite/bdwgc/bdwgc.go @@ -26,11 +26,6 @@ const ( LLGoPackage = "link: $(pkg-config --libs bdw-gc); -lgc" ) -//export initGC -func initGC() { - Init() -} - // ----------------------------------------------------------------------------- //go:linkname Init C.GC_init diff --git a/runtime/internal/runtime/tinygogc/gc.go b/runtime/internal/runtime/tinygogc/gc.go index 6c93d876..0f91b82a 100644 --- a/runtime/internal/runtime/tinygogc/gc.go +++ b/runtime/internal/runtime/tinygogc/gc.go @@ -1,9 +1,3 @@ package tinygogc -import "github.com/goplus/llgo/runtime/internal/runtime" - -const LLGoPackage = "noinit" - -func GC() { - runtime.GC() -} +const LLGoPackage = "link: --wrap=malloc --wrap=realloc --wrap=calloc" diff --git a/runtime/internal/runtime/gc_tinygo.go b/runtime/internal/runtime/tinygogc/gc_tinygo.go similarity index 76% rename from runtime/internal/runtime/gc_tinygo.go rename to runtime/internal/runtime/tinygogc/gc_tinygo.go index 9962337c..2efcda91 100644 --- a/runtime/internal/runtime/gc_tinygo.go +++ b/runtime/internal/runtime/tinygogc/gc_tinygo.go @@ -16,29 +16,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package runtime +package tinygogc import ( "unsafe" _ "unsafe" - - c "github.com/goplus/llgo/runtime/internal/clite" - "github.com/goplus/llgo/runtime/internal/runtime/tinygogc/memory" ) const gcDebug = false const needsStaticHeap = true -// Some globals + constants for the entire GC. - -const ( - wordsPerBlock = 4 // number of pointers in an allocated block - bytesPerBlock = wordsPerBlock * unsafe.Sizeof(memory.HeapStart) - stateBits = 2 // how many bits a block state takes (see blockState type) - blocksPerStateByte = 8 / stateBits - markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan -) - // Provide some abc.Straction over heap blocks. // blockState stores the four states in which a block can be. It is two bits in @@ -52,17 +39,52 @@ const ( blockStateMask uint8 = 3 // 11 ) +// The byte value of a block where every block is a 'tail' block. +const blockStateByteAllTails = 0 | + uint8(blockStateTail<<(stateBits*3)) | + uint8(blockStateTail<<(stateBits*2)) | + uint8(blockStateTail<<(stateBits*1)) | + uint8(blockStateTail<<(stateBits*0)) + //go:linkname getsp llgo.stackSave func getsp() unsafe.Pointer -func printlnAndPanic(c string) { - println(c) - panic("") -} +// when executing initGC(), we must ensure there's no any allocations. +// use linking here to avoid import clite +// +//go:linkname memset C.memset +func memset(unsafe.Pointer, int, uintptr) unsafe.Pointer +//go:linkname memcpy C.memcpy +func memcpy(unsafe.Pointer, unsafe.Pointer, uintptr) + +//go:linkname _heapStart _heapStart +var _heapStart [0]byte + +//go:linkname _heapEnd _heapEnd +var _heapEnd [0]byte + +//go:linkname _stackStart _stack_top +var _stackStart [0]byte + +//go:linkname _globals_start _globals_start +var _globals_start [0]byte + +//go:linkname _globals_end _globals_end +var _globals_end [0]byte + +// since we don't have an init() function, these should be initalized by initHeap(), which is called by
entry var ( + heapStart uintptr // start address of heap area + heapEnd uintptr // end address of heap area + globalsStart uintptr // start address of global variable area + globalsEnd uintptr // end address of global variable area + stackTop uintptr // the top of stack + endBlock uintptr // GC end block index + metadataStart unsafe.Pointer // start address of GC metadata + isGCInit bool + nextAlloc uintptr // the next block that should be tried by the allocator - endBlock uintptr // the block just past the end of the available space gcTotalAlloc uint64 // total number of bytes allocated gcTotalBlocks uint64 // total number of allocated blocks gcMallocs uint64 // total number of allocations @@ -77,24 +99,61 @@ var ( zeroSizedAlloc uint8 ) -// blockState stores the four states in which a block can be. It is two bits in -// size. -type blockState uint8 +// Some globals + constants for the entire GC. -// The byte value of a block where every block is a 'tail' block. -const blockStateByteAllTails = 0 | - uint8(blockStateTail<<(stateBits*3)) | - uint8(blockStateTail<<(stateBits*2)) | - uint8(blockStateTail<<(stateBits*1)) | - uint8(blockStateTail<<(stateBits*0)) +const ( + wordsPerBlock = 4 // number of pointers in an allocated block + bytesPerBlock = wordsPerBlock * unsafe.Sizeof(heapStart) + stateBits = 2 // how many bits a block state takes (see blockState type) + blocksPerStateByte = 8 / stateBits + markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan +) + +//export __wrap_malloc +func __wrap_malloc(size uintptr) unsafe.Pointer { + return Alloc(size) +} + +//export __wrap_calloc +func __wrap_calloc(size uintptr) unsafe.Pointer { + return Alloc(size) +} + +//export __wrap_realloc +func __wrap_realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer { + return Realloc(ptr, size) +} + +// this function MUST be initalized first, which means it's required to be initalized before runtime +func initGC() { + // reserve 2K blocks for libc internal malloc, we cannot wrap that function + heapStart = uintptr(unsafe.Pointer(&_heapStart)) + 2048 + heapEnd = uintptr(unsafe.Pointer(&_heapEnd)) + globalsStart = uintptr(unsafe.Pointer(&_globals_start)) + globalsEnd = uintptr(unsafe.Pointer(&_globals_end)) + totalSize := heapEnd - heapStart + metadataSize := (totalSize + blocksPerStateByte*bytesPerBlock) / (1 + blocksPerStateByte*bytesPerBlock) + metadataStart = unsafe.Pointer(heapEnd - metadataSize) + endBlock = (uintptr(metadataStart) - heapStart) / bytesPerBlock + stackTop = uintptr(unsafe.Pointer(&_stackStart)) + + memset(metadataStart, 0, metadataSize) +} + +func lazyInit() { + if !isGCInit { + initGC() + isGCInit = true + } +} // blockFromAddr returns a block given an address somewhere in the heap (which // might not be heap-aligned). func blockFromAddr(addr uintptr) uintptr { - if addr < memory.HeapStart || addr >= uintptr(memory.MetadataStart) { - printlnAndPanic("gc: trying to get block from invalid address") + if addr < heapStart || addr >= uintptr(metadataStart) { + println("gc: trying to get block from invalid address") } - return (addr - memory.HeapStart) / bytesPerBlock + return (addr - heapStart) / bytesPerBlock } // Return a pointer to the start of the allocated object. @@ -104,9 +163,9 @@ func gcPointerOf(blockAddr uintptr) unsafe.Pointer { // Return the address of the start of the allocated object. func gcAddressOf(blockAddr uintptr) uintptr { - addr := memory.HeapStart + blockAddr*bytesPerBlock - if addr > uintptr(memory.MetadataStart) { - printlnAndPanic("gc: block pointing inside metadata") + addr := heapStart + blockAddr*bytesPerBlock + if addr > uintptr(metadataStart) { + println("gc: block pointing inside metadata") } return addr } @@ -137,7 +196,7 @@ func gcFindHead(blockAddr uintptr) uintptr { blockAddr-- } if gcStateOf(blockAddr) != blockStateHead && gcStateOf(blockAddr) != blockStateMark { - printlnAndPanic("gc: found tail without head") + println("gc: found tail without head") } return blockAddr } @@ -148,14 +207,14 @@ func gcFindNext(blockAddr uintptr) uintptr { if gcStateOf(blockAddr) == blockStateHead || gcStateOf(blockAddr) == blockStateMark { blockAddr++ } - for gcAddressOf(blockAddr) < uintptr(memory.MetadataStart) && gcStateOf(blockAddr) == blockStateTail { + for gcAddressOf(blockAddr) < uintptr(metadataStart) && gcStateOf(blockAddr) == blockStateTail { blockAddr++ } return blockAddr } func gcStateByteOf(blockAddr uintptr) byte { - return *(*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte)) + return *(*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte)) } // Return the block state given a state byte. The state byte must have been @@ -173,19 +232,19 @@ func gcStateOf(blockAddr uintptr) uint8 { // bits than the current state. Allowed transitions: from free to any state and // from head to mark. func gcSetState(blockAddr uintptr, newState uint8) { - stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte)) + stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte)) *stateBytePtr |= uint8(newState << ((blockAddr % blocksPerStateByte) * stateBits)) if gcStateOf(blockAddr) != newState { - printlnAndPanic("gc: setState() was not successful") + println("gc: setState() was not successful") } } // markFree sets the block state to free, no matter what state it was in before. func gcMarkFree(blockAddr uintptr) { - stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte)) + stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte)) *stateBytePtr &^= uint8(blockStateMask << ((blockAddr % blocksPerStateByte) * stateBits)) if gcStateOf(blockAddr) != blockStateFree { - printlnAndPanic("gc: markFree() was not successful") + println("gc: markFree() was not successful") } *(*[wordsPerBlock]uintptr)(unsafe.Pointer(gcAddressOf(blockAddr))) = [wordsPerBlock]uintptr{} } @@ -194,25 +253,27 @@ func gcMarkFree(blockAddr uintptr) { // before calling this function. func gcUnmark(blockAddr uintptr) { if gcStateOf(blockAddr) != blockStateMark { - printlnAndPanic("gc: unmark() on a block that is not marked") + println("gc: unmark() on a block that is not marked") } clearMask := blockStateMask ^ blockStateHead // the bits to clear from the state - stateBytePtr := (*uint8)(unsafe.Add(memory.MetadataStart, blockAddr/blocksPerStateByte)) + stateBytePtr := (*uint8)(unsafe.Add(metadataStart, blockAddr/blocksPerStateByte)) *stateBytePtr &^= uint8(clearMask << ((blockAddr % blocksPerStateByte) * stateBits)) if gcStateOf(blockAddr) != blockStateHead { - printlnAndPanic("gc: unmark() was not successful") + println("gc: unmark() was not successful") } } func isOnHeap(ptr uintptr) bool { - return ptr >= memory.HeapStart && ptr < uintptr(memory.MetadataStart) + return ptr >= heapStart && ptr < uintptr(metadataStart) } // alloc tries to find some free space on the heap, possibly doing a garbage // collection cycle if needed. If no space is free, it panics. // //go:noinline -func alloc(size uintptr) unsafe.Pointer { +func Alloc(size uintptr) unsafe.Pointer { + lazyInit() + if size == 0 { return unsafe.Pointer(&zeroSizedAlloc) } @@ -237,7 +298,7 @@ func alloc(size uintptr) unsafe.Pointer { // free memory and try again. heapScanCount = 2 freeBytes := GC() - heapSize := uintptr(memory.MetadataStart) - memory.HeapStart + heapSize := uintptr(metadataStart) - heapStart if freeBytes < heapSize/3 { // Ensure there is at least 33% headroom. // This percentage was arbitrarily chosen, and may need to @@ -254,13 +315,13 @@ func alloc(size uintptr) unsafe.Pointer { // Unfortunately the heap could not be increased. This // happens on baremetal systems for example (where all // available RAM has already been dedicated to the heap). - printlnAndPanic("out of memory") + println("out of memory") } } } // Wrap around the end of the heap. - if index == memory.EndBlock { + if index == endBlock { index = 0 // Reset numFreeBlocks as allocations cannot wrap. numFreeBlocks = 0 @@ -296,14 +357,15 @@ func alloc(size uintptr) unsafe.Pointer { } // Return a pointer to this allocation. - return gcPointerOf(thisAlloc) + return memset(gcPointerOf(thisAlloc), 0, size) } } } -func realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer { +func Realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer { + lazyInit() if ptr == nil { - return alloc(size) + return Alloc(size) } ptrAddress := uintptr(ptr) @@ -316,8 +378,8 @@ func realloc(ptr unsafe.Pointer, size uintptr) unsafe.Pointer { return ptr } - newAlloc := alloc(size) - c.Memcpy(newAlloc, ptr, oldSize) + newAlloc := Alloc(size) + memcpy(newAlloc, ptr, oldSize) free(ptr) return newAlloc @@ -331,6 +393,8 @@ func free(ptr unsafe.Pointer) { // of the runtime.GC() function. The difference is that it returns the number of // free bytes in the heap after the GC is finished. func GC() (freeBytes uintptr) { + lazyInit() + if gcDebug { println("running collection cycle...") } @@ -356,19 +420,9 @@ func GC() (freeBytes uintptr) { // well (recursively). The start and end parameters must be valid pointers and // must be aligned. func markRoots(start, end uintptr) { - - if true { - if start >= end { - printlnAndPanic("gc: unexpected range to mark") - } - if start%unsafe.Alignof(start) != 0 { - printlnAndPanic("gc: unaligned start pointer") - } - if end%unsafe.Alignof(end) != 0 { - printlnAndPanic("gc: unaligned end pointer") - } + if start >= end { + println("gc: unexpected range to mark") } - // Reduce the end bound to avoid reading too far on platforms where pointer alignment is smaller than pointer size. // If the size of the range is 0, then end will be slightly below start after this. end -= unsafe.Sizeof(end) - unsafe.Alignof(end) @@ -419,11 +473,8 @@ func startMark(root uintptr) { } // Mark block. - gcSetState(referencedBlock, blockStateMark) - println("mark: %lx from %lx", gcPointerOf(referencedBlock), gcPointerOf(root)) - if stackLen == len(stack) { // The stack is full. // It is necessary to rescan all marked blocks once we are done. @@ -446,7 +497,7 @@ func finishMark() { for markStackOverflow { // Re-mark all blocks. markStackOverflow = false - for block := uintptr(0); block < memory.EndBlock; block++ { + for block := uintptr(0); block < endBlock; block++ { if gcStateOf(block) != blockStateMark { // Block is not marked, so we do not need to rescan it. continue @@ -461,7 +512,6 @@ func finishMark() { // mark a GC root at the address addr. func markRoot(addr, root uintptr) { if isOnHeap(root) { - println("on the heap: %lx", gcPointerOf(root)) block := blockFromAddr(root) if gcStateOf(block) == blockStateFree { // The to-be-marked object doesn't actually exist. @@ -477,14 +527,13 @@ func markRoot(addr, root uintptr) { } } -// Sweep goes through all memory and frees unmarked memory. +// Sweep goes through all memory and frees unmarked // It returns how many bytes are free in the heap after the sweep. func sweep() (freeBytes uintptr) { freeCurrentObject := false var freed uint64 - var from uintptr - for block := uintptr(0); block < memory.EndBlock; block++ { + for block := uintptr(0); block < endBlock; block++ { switch gcStateOf(block) { case blockStateHead: // Unmarked head. Free it, including all tail blocks following it. @@ -492,7 +541,6 @@ func sweep() (freeBytes uintptr) { freeCurrentObject = true gcFrees++ freed++ - from = block case blockStateTail: if freeCurrentObject { // This is a tail object following an unmarked head. @@ -500,7 +548,6 @@ func sweep() (freeBytes uintptr) { gcMarkFree(block) freed++ } - println("free from %lx to %lx", gcPointerOf(from), gcPointerOf(block)) case blockStateMark: // This is a marked object. The next tail blocks must not be freed, // but the mark bit must be removed so the next GC cycle will @@ -524,12 +571,9 @@ func growHeap() bool { } func gcMarkReachable() { - // a compiler trick to get current SP - println("scan stack", unsafe.Pointer(getsp()), unsafe.Pointer(memory.StackTop)) - markRoots(uintptr(getsp()), memory.StackTop) - println("scan global", unsafe.Pointer(memory.GlobalsStart), unsafe.Pointer(memory.GlobalsEnd)) - - markRoots(memory.GlobalsStart, memory.GlobalsEnd) + println("scan stack", getsp(), unsafe.Pointer(stackTop)) + markRoots(uintptr(getsp()), stackTop) + markRoots(globalsStart, globalsEnd) } func gcResumeWorld() { diff --git a/runtime/internal/runtime/tinygogc/memory/memory.go b/runtime/internal/runtime/tinygogc/memory/memory.go deleted file mode 100644 index 80312784..00000000 --- a/runtime/internal/runtime/tinygogc/memory/memory.go +++ /dev/null @@ -1,71 +0,0 @@ -//go:build baremetal - -package memory - -import "unsafe" - -// no init function, we don't want to init this twice -const LLGoPackage = "noinit" - -//go:linkname _heapStart _heapStart -var _heapStart [0]byte - -//go:linkname _heapEnd _heapEnd -var _heapEnd [0]byte - -//go:linkname _stackStart _stack_top -var _stackStart [0]byte - -//go:linkname _globals_start _globals_start -var _globals_start [0]byte - -//go:linkname _globals_end _globals_end -var _globals_end [0]byte - -// since we don't have an init() function, these should be initalized by initHeap(), which is called by
entry -var ( - HeapStart uintptr // start address of heap area - HeapEnd uintptr // end address of heap area - GlobalsStart uintptr // start address of global variable area - GlobalsEnd uintptr // end address of global variable area - StackTop uintptr // the top of stack - EndBlock uintptr // GC end block index - MetadataStart unsafe.Pointer // start address of GC metadata -) - -// Some globals + constants for the entire GC. - -const ( - wordsPerBlock = 4 // number of pointers in an allocated block - bytesPerBlock = wordsPerBlock * unsafe.Sizeof(HeapStart) - stateBits = 2 // how many bits a block state takes (see blockState type) - blocksPerStateByte = 8 / stateBits - markStackSize = 8 * unsafe.Sizeof((*int)(nil)) // number of to-be-marked blocks to queue before forcing a rescan -) - -// zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes. -var zeroSizedAlloc uint8 - -// when executing initGC(), we must ensure there's no any allocations. -// use linking here to avoid import clite -// -//go:linkname memset C.memset -func memset(unsafe.Pointer, int, uintptr) - -// this function MUST be initalized first, which means it's required to be initalized before runtime -// -//export initGC -func initGC() { - // reserve 2K blocks for malloc - HeapStart = uintptr(unsafe.Pointer(&_heapStart)) + 2048 - HeapEnd = uintptr(unsafe.Pointer(&_heapEnd)) - GlobalsStart = uintptr(unsafe.Pointer(&_globals_start)) - GlobalsEnd = uintptr(unsafe.Pointer(&_globals_end)) - totalSize := HeapEnd - HeapStart - metadataSize := (totalSize + blocksPerStateByte*bytesPerBlock) / (1 + blocksPerStateByte*bytesPerBlock) - MetadataStart = unsafe.Pointer(HeapEnd - metadataSize) - EndBlock = (uintptr(MetadataStart) - HeapStart) / bytesPerBlock - StackTop = uintptr(unsafe.Pointer(&_stackStart)) - - memset(MetadataStart, 0, metadataSize) -} diff --git a/runtime/internal/runtime/z_gc.go b/runtime/internal/runtime/z_gc.go index 8bb820be..5669667b 100644 --- a/runtime/internal/runtime/z_gc.go +++ b/runtime/internal/runtime/z_gc.go @@ -23,6 +23,7 @@ import ( c "github.com/goplus/llgo/runtime/internal/clite" "github.com/goplus/llgo/runtime/internal/clite/bdwgc" + _ "github.com/goplus/llgo/runtime/internal/runtime/bdwgc" ) // AllocU allocates uninitialized memory. diff --git a/runtime/internal/runtime/z_gc_baremetal.go b/runtime/internal/runtime/z_gc_baremetal.go index b3e4d47a..a2d3115c 100644 --- a/runtime/internal/runtime/z_gc_baremetal.go +++ b/runtime/internal/runtime/z_gc_baremetal.go @@ -21,16 +21,15 @@ package runtime import ( "unsafe" - c "github.com/goplus/llgo/runtime/internal/clite" + "github.com/goplus/llgo/runtime/internal/runtime/tinygogc" ) // AllocU allocates uninitialized memory. func AllocU(size uintptr) unsafe.Pointer { - return alloc(size) + return tinygogc.Alloc(size) } // AllocZ allocates zero-initialized memory. func AllocZ(size uintptr) unsafe.Pointer { - ptr := alloc(size) - return c.Memset(ptr, 0, size) + return tinygogc.Alloc(size) } diff --git a/targets/esp32-riscv.app.elf.ld b/targets/esp32-riscv.app.elf.ld index a7c7d623..d6cd85d5 100644 --- a/targets/esp32-riscv.app.elf.ld +++ b/targets/esp32-riscv.app.elf.ld @@ -92,6 +92,12 @@ SECTIONS _iram_end = .; } > iram_seg + .stack (NOLOAD) : + { + . += 16K; + __stack = .; + } > dram_seg + /** * This section is required to skip .iram0.text area because iram0_0_seg and * dram0_0_seg reflect the same address space on different buses. @@ -102,14 +108,6 @@ SECTIONS . += ORIGIN(iram_seg) == ORIGIN(dram_seg) ? 0 : _iram_end - _iram_start; } > dram_seg - .stack (NOLOAD) : - { - __stack_end = .; - . = ALIGN(16); - . += 16K; - __stack = .; - } - .data : { _data_start = .; diff --git a/targets/esp32.app.elf.ld b/targets/esp32.app.elf.ld index 071b43d0..e3c27fdc 100755 --- a/targets/esp32.app.elf.ld +++ b/targets/esp32.app.elf.ld @@ -27,7 +27,6 @@ SECTIONS .stack (NOLOAD) : { - __stack_end = .; . = ALIGN(16); . += 16K; __stack = .; @@ -176,3 +175,108 @@ _globals_start = _data_start; _globals_end = _end; _heapStart = _end; _stack_top = __stack; + + +/* From ESP-IDF: + * components/esp_rom/esp32/ld/esp32.rom.newlib-funcs.ld + * This is the subset that is sometimes used by LLVM during codegen, and thus + * must always be present. + */ +memcpy = 0x4000c2c8; +memmove = 0x4000c3c0; +memset = 0x4000c44c; + +/* From ESP-IDF: + * components/esp_rom/esp32/ld/esp32.rom.libgcc.ld + * These are called from LLVM during codegen. The original license is Apache + * 2.0, but I believe that a list of function names and addresses can't really + * be copyrighted. + */ +__absvdi2 = 0x4006387c; +__absvsi2 = 0x40063868; +__adddf3 = 0x40002590; +__addsf3 = 0x400020e8; +__addvdi3 = 0x40002cbc; +__addvsi3 = 0x40002c98; +__ashldi3 = 0x4000c818; +__ashrdi3 = 0x4000c830; +__bswapdi2 = 0x40064b08; +__bswapsi2 = 0x40064ae0; +__clrsbdi2 = 0x40064b7c; +__clrsbsi2 = 0x40064b64; +__clzdi2 = 0x4000ca50; +__clzsi2 = 0x4000c7e8; +__cmpdi2 = 0x40063820; +__ctzdi2 = 0x4000ca64; +__ctzsi2 = 0x4000c7f0; +__divdc3 = 0x400645a4; +__divdf3 = 0x40002954; +__divdi3 = 0x4000ca84; +__divsi3 = 0x4000c7b8; +__eqdf2 = 0x400636a8; +__eqsf2 = 0x40063374; +__extendsfdf2 = 0x40002c34; +__ffsdi2 = 0x4000ca2c; +__ffssi2 = 0x4000c804; +__fixdfdi = 0x40002ac4; +__fixdfsi = 0x40002a78; +__fixsfdi = 0x4000244c; +__fixsfsi = 0x4000240c; +__fixunsdfsi = 0x40002b30; +__fixunssfdi = 0x40002504; +__fixunssfsi = 0x400024ac; +__floatdidf = 0x4000c988; +__floatdisf = 0x4000c8c0; +__floatsidf = 0x4000c944; +__floatsisf = 0x4000c870; +__floatundidf = 0x4000c978; +__floatundisf = 0x4000c8b0; +__floatunsidf = 0x4000c938; +__floatunsisf = 0x4000c864; +__gcc_bcmp = 0x40064a70; +__gedf2 = 0x40063768; +__gesf2 = 0x4006340c; +__gtdf2 = 0x400636dc; +__gtsf2 = 0x400633a0; +__ledf2 = 0x40063704; +__lesf2 = 0x400633c0; +__lshrdi3 = 0x4000c84c; +__ltdf2 = 0x40063790; +__ltsf2 = 0x4006342c; +__moddi3 = 0x4000cd4c; +__modsi3 = 0x4000c7c0; +__muldc3 = 0x40063c90; +__muldf3 = 0x4006358c; +__muldi3 = 0x4000c9fc; +__mulsf3 = 0x400632c8; +__mulsi3 = 0x4000c7b0; +__mulvdi3 = 0x40002d78; +__mulvsi3 = 0x40002d60; +__nedf2 = 0x400636a8; +__negdf2 = 0x400634a0; +__negdi2 = 0x4000ca14; +__negsf2 = 0x400020c0; +__negvdi2 = 0x40002e98; +__negvsi2 = 0x40002e78; +__nesf2 = 0x40063374; +__nsau_data = 0x3ff96544; +__paritysi2 = 0x40002f3c; +__popcount_tab = 0x3ff96544; +__popcountdi2 = 0x40002ef8; +__popcountsi2 = 0x40002ed0; +__powidf2 = 0x400638e4; +__subdf3 = 0x400026e4; +__subsf3 = 0x400021d0; +__subvdi3 = 0x40002d20; +__subvsi3 = 0x40002cf8; +__truncdfsf2 = 0x40002b90; +__ucmpdi2 = 0x40063840; +__udiv_w_sdiv = 0x40064bec; +__udivdi3 = 0x4000cff8; +__udivmoddi4 = 0x40064bf4; +__udivsi3 = 0x4000c7c8; +__umoddi3 = 0x4000d280; +__umodsi3 = 0x4000c7d0; +__umulsidi3 = 0x4000c7d8; +__unorddf2 = 0x400637f4; +__unordsf2 = 0x40063478;