501 lines
14 KiB
Go
501 lines
14 KiB
Go
package runtime
|
|
|
|
import (
|
|
"internal/goarch"
|
|
"internal/goos"
|
|
)
|
|
|
|
func Goexit() {
|
|
panic("todo")
|
|
}
|
|
|
|
func KeepAlive(x any) {
|
|
panic("todo")
|
|
}
|
|
|
|
func SetFinalizer(obj any, finalizer any) {
|
|
panic("todo")
|
|
}
|
|
|
|
// GOOS is the running program's operating system target:
|
|
// one of darwin, freebsd, linux, and so on.
|
|
// To view possible combinations of GOOS and GOARCH, run "go tool dist list".
|
|
const GOOS string = goos.GOOS
|
|
|
|
// GOARCH is the running program's architecture target:
|
|
// one of 386, amd64, arm, s390x, and so on.
|
|
const GOARCH string = goarch.GOARCH
|
|
|
|
// Compiler is the name of the compiler toolchain that built the
|
|
// running binary. Known toolchains are:
|
|
//
|
|
// gc Also known as cmd/compile.
|
|
// gccgo The gccgo front end, part of the GCC compiler suite.
|
|
const Compiler = "llgo"
|
|
|
|
func GOMAXPROCS(n int) int {
|
|
panic("todo")
|
|
}
|
|
|
|
func GC() {
|
|
panic("todo")
|
|
}
|
|
|
|
func GOROOT() string {
|
|
panic("todo")
|
|
}
|
|
|
|
func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
|
|
panic("todo")
|
|
}
|
|
|
|
func Callers(skip int, pc []uintptr) int {
|
|
panic("todo")
|
|
}
|
|
|
|
type Func struct {
|
|
opaque struct{} // unexported field to disallow conversions
|
|
}
|
|
|
|
func (f *Func) Name() string {
|
|
panic("todo")
|
|
}
|
|
|
|
func FuncForPC(pc uintptr) *Func {
|
|
panic("todo")
|
|
}
|
|
|
|
type nih struct{}
|
|
|
|
type NotInHeap struct{ _ nih }
|
|
|
|
type FuncID uint8
|
|
|
|
type FuncFlag uint8
|
|
|
|
type _func struct {
|
|
NotInHeap // Only in static data
|
|
|
|
entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
|
|
nameOff int32 // function name, as index into moduledata.funcnametab.
|
|
|
|
args int32 // in/out args size
|
|
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
|
|
|
|
pcsp uint32
|
|
pcfile uint32
|
|
pcln uint32
|
|
npcdata uint32
|
|
cuOffset uint32 // runtime.cutab offset of this function's CU
|
|
startLine int32 // line number of start of function (func keyword/TEXT directive)
|
|
funcID FuncID // set for certain special runtime functions
|
|
flag FuncFlag
|
|
_ [1]byte // pad
|
|
nfuncdata uint8 // must be last, must end on a uint32-aligned boundary
|
|
}
|
|
|
|
type moduledata struct{}
|
|
|
|
type funcInfo struct {
|
|
*_func
|
|
datap *moduledata
|
|
}
|
|
|
|
type Frame struct {
|
|
PC uintptr
|
|
Func *Func
|
|
Function string
|
|
File string
|
|
Line int
|
|
startLine int
|
|
Entry uintptr
|
|
funcInfo funcInfo
|
|
}
|
|
|
|
type Frames struct {
|
|
callers []uintptr
|
|
nextPC uintptr
|
|
frames []Frame
|
|
frameStore [2]Frame
|
|
}
|
|
|
|
func (ci *Frames) Next() (frame Frame, more bool) {
|
|
panic("todo")
|
|
}
|
|
|
|
func CallersFrames(callers []uintptr) *Frames {
|
|
panic("todo")
|
|
}
|
|
|
|
func Stack(buf []byte, all bool) int {
|
|
panic("todo")
|
|
}
|
|
|
|
func Version() string {
|
|
panic("todo")
|
|
}
|
|
|
|
type MemStats struct {
|
|
// General statistics.
|
|
|
|
// Alloc is bytes of allocated heap objects.
|
|
//
|
|
// This is the same as HeapAlloc (see below).
|
|
Alloc uint64
|
|
|
|
// TotalAlloc is cumulative bytes allocated for heap objects.
|
|
//
|
|
// TotalAlloc increases as heap objects are allocated, but
|
|
// unlike Alloc and HeapAlloc, it does not decrease when
|
|
// objects are freed.
|
|
TotalAlloc uint64
|
|
|
|
// Sys is the total bytes of memory obtained from the OS.
|
|
//
|
|
// Sys is the sum of the XSys fields below. Sys measures the
|
|
// virtual address space reserved by the Go runtime for the
|
|
// heap, stacks, and other internal data structures. It's
|
|
// likely that not all of the virtual address space is backed
|
|
// by physical memory at any given moment, though in general
|
|
// it all was at some point.
|
|
Sys uint64
|
|
|
|
// Lookups is the number of pointer lookups performed by the
|
|
// runtime.
|
|
//
|
|
// This is primarily useful for debugging runtime internals.
|
|
Lookups uint64
|
|
|
|
// Mallocs is the cumulative count of heap objects allocated.
|
|
// The number of live objects is Mallocs - Frees.
|
|
Mallocs uint64
|
|
|
|
// Frees is the cumulative count of heap objects freed.
|
|
Frees uint64
|
|
|
|
// Heap memory statistics.
|
|
//
|
|
// Interpreting the heap statistics requires some knowledge of
|
|
// how Go organizes memory. Go divides the virtual address
|
|
// space of the heap into "spans", which are contiguous
|
|
// regions of memory 8K or larger. A span may be in one of
|
|
// three states:
|
|
//
|
|
// An "idle" span contains no objects or other data. The
|
|
// physical memory backing an idle span can be released back
|
|
// to the OS (but the virtual address space never is), or it
|
|
// can be converted into an "in use" or "stack" span.
|
|
//
|
|
// An "in use" span contains at least one heap object and may
|
|
// have free space available to allocate more heap objects.
|
|
//
|
|
// A "stack" span is used for goroutine stacks. Stack spans
|
|
// are not considered part of the heap. A span can change
|
|
// between heap and stack memory; it is never used for both
|
|
// simultaneously.
|
|
|
|
// HeapAlloc is bytes of allocated heap objects.
|
|
//
|
|
// "Allocated" heap objects include all reachable objects, as
|
|
// well as unreachable objects that the garbage collector has
|
|
// not yet freed. Specifically, HeapAlloc increases as heap
|
|
// objects are allocated and decreases as the heap is swept
|
|
// and unreachable objects are freed. Sweeping occurs
|
|
// incrementally between GC cycles, so these two processes
|
|
// occur simultaneously, and as a result HeapAlloc tends to
|
|
// change smoothly (in contrast with the sawtooth that is
|
|
// typical of stop-the-world garbage collectors).
|
|
HeapAlloc uint64
|
|
|
|
// HeapSys is bytes of heap memory obtained from the OS.
|
|
//
|
|
// HeapSys measures the amount of virtual address space
|
|
// reserved for the heap. This includes virtual address space
|
|
// that has been reserved but not yet used, which consumes no
|
|
// physical memory, but tends to be small, as well as virtual
|
|
// address space for which the physical memory has been
|
|
// returned to the OS after it became unused (see HeapReleased
|
|
// for a measure of the latter).
|
|
//
|
|
// HeapSys estimates the largest size the heap has had.
|
|
HeapSys uint64
|
|
|
|
// HeapIdle is bytes in idle (unused) spans.
|
|
//
|
|
// Idle spans have no objects in them. These spans could be
|
|
// (and may already have been) returned to the OS, or they can
|
|
// be reused for heap allocations, or they can be reused as
|
|
// stack memory.
|
|
//
|
|
// HeapIdle minus HeapReleased estimates the amount of memory
|
|
// that could be returned to the OS, but is being retained by
|
|
// the runtime so it can grow the heap without requesting more
|
|
// memory from the OS. If this difference is significantly
|
|
// larger than the heap size, it indicates there was a recent
|
|
// transient spike in live heap size.
|
|
HeapIdle uint64
|
|
|
|
// HeapInuse is bytes in in-use spans.
|
|
//
|
|
// In-use spans have at least one object in them. These spans
|
|
// can only be used for other objects of roughly the same
|
|
// size.
|
|
//
|
|
// HeapInuse minus HeapAlloc estimates the amount of memory
|
|
// that has been dedicated to particular size classes, but is
|
|
// not currently being used. This is an upper bound on
|
|
// fragmentation, but in general this memory can be reused
|
|
// efficiently.
|
|
HeapInuse uint64
|
|
|
|
// HeapReleased is bytes of physical memory returned to the OS.
|
|
//
|
|
// This counts heap memory from idle spans that was returned
|
|
// to the OS and has not yet been reacquired for the heap.
|
|
HeapReleased uint64
|
|
|
|
// HeapObjects is the number of allocated heap objects.
|
|
//
|
|
// Like HeapAlloc, this increases as objects are allocated and
|
|
// decreases as the heap is swept and unreachable objects are
|
|
// freed.
|
|
HeapObjects uint64
|
|
|
|
// Stack memory statistics.
|
|
//
|
|
// Stacks are not considered part of the heap, but the runtime
|
|
// can reuse a span of heap memory for stack memory, and
|
|
// vice-versa.
|
|
|
|
// StackInuse is bytes in stack spans.
|
|
//
|
|
// In-use stack spans have at least one stack in them. These
|
|
// spans can only be used for other stacks of the same size.
|
|
//
|
|
// There is no StackIdle because unused stack spans are
|
|
// returned to the heap (and hence counted toward HeapIdle).
|
|
StackInuse uint64
|
|
|
|
// StackSys is bytes of stack memory obtained from the OS.
|
|
//
|
|
// StackSys is StackInuse, plus any memory obtained directly
|
|
// from the OS for OS thread stacks.
|
|
//
|
|
// In non-cgo programs this metric is currently equal to StackInuse
|
|
// (but this should not be relied upon, and the value may change in
|
|
// the future).
|
|
//
|
|
// In cgo programs this metric includes OS thread stacks allocated
|
|
// directly from the OS. Currently, this only accounts for one stack in
|
|
// c-shared and c-archive build modes and other sources of stacks from
|
|
// the OS (notably, any allocated by C code) are not currently measured.
|
|
// Note this too may change in the future.
|
|
StackSys uint64
|
|
|
|
// Off-heap memory statistics.
|
|
//
|
|
// The following statistics measure runtime-internal
|
|
// structures that are not allocated from heap memory (usually
|
|
// because they are part of implementing the heap). Unlike
|
|
// heap or stack memory, any memory allocated to these
|
|
// structures is dedicated to these structures.
|
|
//
|
|
// These are primarily useful for debugging runtime memory
|
|
// overheads.
|
|
|
|
// MSpanInuse is bytes of allocated mspan structures.
|
|
MSpanInuse uint64
|
|
|
|
// MSpanSys is bytes of memory obtained from the OS for mspan
|
|
// structures.
|
|
MSpanSys uint64
|
|
|
|
// MCacheInuse is bytes of allocated mcache structures.
|
|
MCacheInuse uint64
|
|
|
|
// MCacheSys is bytes of memory obtained from the OS for
|
|
// mcache structures.
|
|
MCacheSys uint64
|
|
|
|
// BuckHashSys is bytes of memory in profiling bucket hash tables.
|
|
BuckHashSys uint64
|
|
|
|
// GCSys is bytes of memory in garbage collection metadata.
|
|
GCSys uint64
|
|
|
|
// OtherSys is bytes of memory in miscellaneous off-heap
|
|
// runtime allocations.
|
|
OtherSys uint64
|
|
|
|
// Garbage collector statistics.
|
|
|
|
// NextGC is the target heap size of the next GC cycle.
|
|
//
|
|
// The garbage collector's goal is to keep HeapAlloc ≤ NextGC.
|
|
// At the end of each GC cycle, the target for the next cycle
|
|
// is computed based on the amount of reachable data and the
|
|
// value of GOGC.
|
|
NextGC uint64
|
|
|
|
// LastGC is the time the last garbage collection finished, as
|
|
// nanoseconds since 1970 (the UNIX epoch).
|
|
LastGC uint64
|
|
|
|
// PauseTotalNs is the cumulative nanoseconds in GC
|
|
// stop-the-world pauses since the program started.
|
|
//
|
|
// During a stop-the-world pause, all goroutines are paused
|
|
// and only the garbage collector can run.
|
|
PauseTotalNs uint64
|
|
|
|
// PauseNs is a circular buffer of recent GC stop-the-world
|
|
// pause times in nanoseconds.
|
|
//
|
|
// The most recent pause is at PauseNs[(NumGC+255)%256]. In
|
|
// general, PauseNs[N%256] records the time paused in the most
|
|
// recent N%256th GC cycle. There may be multiple pauses per
|
|
// GC cycle; this is the sum of all pauses during a cycle.
|
|
PauseNs [256]uint64
|
|
|
|
// PauseEnd is a circular buffer of recent GC pause end times,
|
|
// as nanoseconds since 1970 (the UNIX epoch).
|
|
//
|
|
// This buffer is filled the same way as PauseNs. There may be
|
|
// multiple pauses per GC cycle; this records the end of the
|
|
// last pause in a cycle.
|
|
PauseEnd [256]uint64
|
|
|
|
// NumGC is the number of completed GC cycles.
|
|
NumGC uint32
|
|
|
|
// NumForcedGC is the number of GC cycles that were forced by
|
|
// the application calling the GC function.
|
|
NumForcedGC uint32
|
|
|
|
// GCCPUFraction is the fraction of this program's available
|
|
// CPU time used by the GC since the program started.
|
|
//
|
|
// GCCPUFraction is expressed as a number between 0 and 1,
|
|
// where 0 means GC has consumed none of this program's CPU. A
|
|
// program's available CPU time is defined as the integral of
|
|
// GOMAXPROCS since the program started. That is, if
|
|
// GOMAXPROCS is 2 and a program has been running for 10
|
|
// seconds, its "available CPU" is 20 seconds. GCCPUFraction
|
|
// does not include CPU time used for write barrier activity.
|
|
//
|
|
// This is the same as the fraction of CPU reported by
|
|
// GODEBUG=gctrace=1.
|
|
GCCPUFraction float64
|
|
|
|
// EnableGC indicates that GC is enabled. It is always true,
|
|
// even if GOGC=off.
|
|
EnableGC bool
|
|
|
|
// DebugGC is currently unused.
|
|
DebugGC bool
|
|
|
|
// BySize reports per-size class allocation statistics.
|
|
//
|
|
// BySize[N] gives statistics for allocations of size S where
|
|
// BySize[N-1].Size < S ≤ BySize[N].Size.
|
|
//
|
|
// This does not report allocations larger than BySize[60].Size.
|
|
BySize [61]struct {
|
|
// Size is the maximum byte size of an object in this
|
|
// size class.
|
|
Size uint32
|
|
|
|
// Mallocs is the cumulative count of heap objects
|
|
// allocated in this size class. The cumulative bytes
|
|
// of allocation is Size*Mallocs. The number of live
|
|
// objects in this size class is Mallocs - Frees.
|
|
Mallocs uint64
|
|
|
|
// Frees is the cumulative count of heap objects freed
|
|
// in this size class.
|
|
Frees uint64
|
|
}
|
|
}
|
|
|
|
func ReadMemStats(m *MemStats) {
|
|
panic("todo")
|
|
}
|
|
|
|
type MemProfileRecord struct {
|
|
AllocBytes, FreeBytes int64 // number of bytes allocated, freed
|
|
AllocObjects, FreeObjects int64 // number of objects allocated, freed
|
|
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
|
|
}
|
|
|
|
// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
|
|
func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
|
|
|
|
// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
|
|
func (r *MemProfileRecord) InUseObjects() int64 {
|
|
return r.AllocObjects - r.FreeObjects
|
|
}
|
|
|
|
var MemProfileRate int = 512 * 1024
|
|
|
|
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
|
|
panic("todo")
|
|
}
|
|
|
|
func StartTrace() error {
|
|
panic("todo")
|
|
}
|
|
|
|
func StopTrace() {
|
|
panic("todo")
|
|
}
|
|
|
|
func ReadTrace() []byte {
|
|
panic("todo")
|
|
}
|
|
|
|
func SetBlockProfileRate(rate int) {
|
|
panic("todo")
|
|
}
|
|
|
|
func SetMutexProfileFraction(rate int) int {
|
|
panic("todo")
|
|
}
|
|
|
|
func LockOSThread() {
|
|
panic("todo")
|
|
}
|
|
|
|
func UnlockOSThread() {
|
|
panic("todo")
|
|
}
|
|
|
|
type StackRecord struct {
|
|
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
|
|
}
|
|
|
|
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
|
|
panic("todo")
|
|
}
|
|
|
|
func NumGoroutine() int {
|
|
panic("todo")
|
|
}
|
|
|
|
func SetCPUProfileRate(hz int) {
|
|
panic("todo")
|
|
}
|
|
|
|
type BlockProfileRecord struct {
|
|
Count int64
|
|
Cycles int64
|
|
StackRecord
|
|
}
|
|
|
|
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
|
|
panic("todo")
|
|
}
|
|
|
|
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
|
|
panic("todo")
|
|
}
|