runtime: map
This commit is contained in:
339
internal/runtime/malloc.go
Normal file
339
internal/runtime/malloc.go
Normal file
@@ -0,0 +1,339 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// implementation of new builtin
|
||||
// compiler (both frontend and SSA backend) knows the signature
|
||||
// of this function.
|
||||
func newobject(typ *_type) unsafe.Pointer {
|
||||
return AllocZ(typ.Size_)
|
||||
}
|
||||
|
||||
/*
|
||||
//go:linkname reflect_unsafe_New reflect.unsafe_New
|
||||
func reflect_unsafe_New(typ *_type) unsafe.Pointer {
|
||||
return mallocgc(typ.Size_, typ, true)
|
||||
}
|
||||
|
||||
//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
|
||||
func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
|
||||
return mallocgc(typ.Size_, typ, true)
|
||||
}
|
||||
*/
|
||||
|
||||
const mathMaxUintptr = ^uintptr(0)
|
||||
|
||||
// mathMulUintptr returns a * b and whether the multiplication overflowed.
|
||||
// On supported platforms this is an intrinsic lowered by the compiler.
|
||||
func mathMulUintptr(a, b uintptr) (uintptr, bool) {
|
||||
if a|b < 1<<(4*goarchPtrSize) || a == 0 {
|
||||
return a * b, false
|
||||
}
|
||||
overflow := b > mathMaxUintptr/a
|
||||
return a * b, overflow
|
||||
}
|
||||
|
||||
// newarray allocates an array of n elements of type typ.
|
||||
func newarray(typ *_type, n int) unsafe.Pointer {
|
||||
if n == 1 {
|
||||
return AllocZ(typ.Size_)
|
||||
}
|
||||
mem, overflow := mathMulUintptr(typ.Size_, uintptr(n))
|
||||
if overflow || n < 0 {
|
||||
panic(plainError("runtime: allocation size out of range"))
|
||||
}
|
||||
return AllocZ(mem)
|
||||
}
|
||||
|
||||
/*
|
||||
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
|
||||
func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
|
||||
return newarray(typ, n)
|
||||
}
|
||||
|
||||
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
|
||||
c := getMCache(mp)
|
||||
if c == nil {
|
||||
throw("profilealloc called without a P or outside bootstrapping")
|
||||
}
|
||||
c.nextSample = nextSample()
|
||||
mProf_Malloc(x, size)
|
||||
}
|
||||
|
||||
// nextSample returns the next sampling point for heap profiling. The goal is
|
||||
// to sample allocations on average every MemProfileRate bytes, but with a
|
||||
// completely random distribution over the allocation timeline; this
|
||||
// corresponds to a Poisson process with parameter MemProfileRate. In Poisson
|
||||
// processes, the distance between two samples follows the exponential
|
||||
// distribution (exp(MemProfileRate)), so the best return value is a random
|
||||
// number taken from an exponential distribution whose mean is MemProfileRate.
|
||||
func nextSample() uintptr {
|
||||
if MemProfileRate == 1 {
|
||||
// Callers assign our return value to
|
||||
// mcache.next_sample, but next_sample is not used
|
||||
// when the rate is 1. So avoid the math below and
|
||||
// just return something.
|
||||
return 0
|
||||
}
|
||||
if GOOS == "plan9" {
|
||||
// Plan 9 doesn't support floating point in note handler.
|
||||
if gp := getg(); gp == gp.m.gsignal {
|
||||
return nextSampleNoFP()
|
||||
}
|
||||
}
|
||||
|
||||
return uintptr(fastexprand(MemProfileRate))
|
||||
}
|
||||
|
||||
// fastexprand returns a random number from an exponential distribution with
|
||||
// the specified mean.
|
||||
func fastexprand(mean int) int32 {
|
||||
// Avoid overflow. Maximum possible step is
|
||||
// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
|
||||
switch {
|
||||
case mean > 0x7000000:
|
||||
mean = 0x7000000
|
||||
case mean == 0:
|
||||
return 0
|
||||
}
|
||||
|
||||
// Take a random sample of the exponential distribution exp(-mean*x).
|
||||
// The probability distribution function is mean*exp(-mean*x), so the CDF is
|
||||
// p = 1 - exp(-mean*x), so
|
||||
// q = 1 - p == exp(-mean*x)
|
||||
// log_e(q) = -mean*x
|
||||
// -log_e(q)/mean = x
|
||||
// x = -log_e(q) * mean
|
||||
// x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
|
||||
const randomBitCount = 26
|
||||
q := fastrandn(1<<randomBitCount) + 1
|
||||
qlog := fastlog2(float64(q)) - randomBitCount
|
||||
if qlog > 0 {
|
||||
qlog = 0
|
||||
}
|
||||
const minusLog2 = -0.6931471805599453 // -ln(2)
|
||||
return int32(qlog*(minusLog2*float64(mean))) + 1
|
||||
}
|
||||
|
||||
// nextSampleNoFP is similar to nextSample, but uses older,
|
||||
// simpler code to avoid floating point.
|
||||
func nextSampleNoFP() uintptr {
|
||||
// Set first allocation sample size.
|
||||
rate := MemProfileRate
|
||||
if rate > 0x3fffffff { // make 2*rate not overflow
|
||||
rate = 0x3fffffff
|
||||
}
|
||||
if rate != 0 {
|
||||
return uintptr(fastrandn(uint32(2 * rate)))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type persistentAlloc struct {
|
||||
base *notInHeap
|
||||
off uintptr
|
||||
}
|
||||
|
||||
var globalAlloc struct {
|
||||
mutex
|
||||
persistentAlloc
|
||||
}
|
||||
|
||||
// persistentChunkSize is the number of bytes we allocate when we grow
|
||||
// a persistentAlloc.
|
||||
const persistentChunkSize = 256 << 10
|
||||
|
||||
// persistentChunks is a list of all the persistent chunks we have
|
||||
// allocated. The list is maintained through the first word in the
|
||||
// persistent chunk. This is updated atomically.
|
||||
var persistentChunks *notInHeap
|
||||
|
||||
// Wrapper around sysAlloc that can allocate small chunks.
|
||||
// There is no associated free operation.
|
||||
// Intended for things like function/type/debug-related persistent data.
|
||||
// If align is 0, uses default align (currently 8).
|
||||
// The returned memory will be zeroed.
|
||||
// sysStat must be non-nil.
|
||||
//
|
||||
// Consider marking persistentalloc'd types not in heap by embedding
|
||||
// runtime/internal/sys.NotInHeap.
|
||||
func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
|
||||
var p *notInHeap
|
||||
systemstack(func() {
|
||||
p = persistentalloc1(size, align, sysStat)
|
||||
})
|
||||
return unsafe.Pointer(p)
|
||||
}
|
||||
|
||||
// Must run on system stack because stack growth can (re)invoke it.
|
||||
// See issue 9174.
|
||||
//
|
||||
//go:systemstack
|
||||
func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
|
||||
const (
|
||||
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
|
||||
)
|
||||
|
||||
if size == 0 {
|
||||
throw("persistentalloc: size == 0")
|
||||
}
|
||||
if align != 0 {
|
||||
if align&(align-1) != 0 {
|
||||
throw("persistentalloc: align is not a power of 2")
|
||||
}
|
||||
if align > _PageSize {
|
||||
throw("persistentalloc: align is too large")
|
||||
}
|
||||
} else {
|
||||
align = 8
|
||||
}
|
||||
|
||||
if size >= maxBlock {
|
||||
return (*notInHeap)(sysAlloc(size, sysStat))
|
||||
}
|
||||
|
||||
mp := acquirem()
|
||||
var persistent *persistentAlloc
|
||||
if mp != nil && mp.p != 0 {
|
||||
persistent = &mp.p.ptr().palloc
|
||||
} else {
|
||||
lock(&globalAlloc.mutex)
|
||||
persistent = &globalAlloc.persistentAlloc
|
||||
}
|
||||
persistent.off = alignUp(persistent.off, align)
|
||||
if persistent.off+size > persistentChunkSize || persistent.base == nil {
|
||||
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
|
||||
if persistent.base == nil {
|
||||
if persistent == &globalAlloc.persistentAlloc {
|
||||
unlock(&globalAlloc.mutex)
|
||||
}
|
||||
throw("runtime: cannot allocate memory")
|
||||
}
|
||||
|
||||
// Add the new chunk to the persistentChunks list.
|
||||
for {
|
||||
chunks := uintptr(unsafe.Pointer(persistentChunks))
|
||||
*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
|
||||
if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
|
||||
break
|
||||
}
|
||||
}
|
||||
persistent.off = alignUp(goarch.PtrSize, align)
|
||||
}
|
||||
p := persistent.base.add(persistent.off)
|
||||
persistent.off += size
|
||||
releasem(mp)
|
||||
if persistent == &globalAlloc.persistentAlloc {
|
||||
unlock(&globalAlloc.mutex)
|
||||
}
|
||||
|
||||
if sysStat != &memstats.other_sys {
|
||||
sysStat.add(int64(size))
|
||||
memstats.other_sys.add(-int64(size))
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// inPersistentAlloc reports whether p points to memory allocated by
|
||||
// persistentalloc. This must be nosplit because it is called by the
|
||||
// cgo checker code, which is called by the write barrier code.
|
||||
//
|
||||
//go:nosplit
|
||||
func inPersistentAlloc(p uintptr) bool {
|
||||
chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
|
||||
for chunk != 0 {
|
||||
if p >= chunk && p < chunk+persistentChunkSize {
|
||||
return true
|
||||
}
|
||||
chunk = *(*uintptr)(unsafe.Pointer(chunk))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// linearAlloc is a simple linear allocator that pre-reserves a region
|
||||
// of memory and then optionally maps that region into the Ready state
|
||||
// as needed.
|
||||
//
|
||||
// The caller is responsible for locking.
|
||||
type linearAlloc struct {
|
||||
next uintptr // next free byte
|
||||
mapped uintptr // one byte past end of mapped space
|
||||
end uintptr // end of reserved space
|
||||
|
||||
mapMemory bool // transition memory from Reserved to Ready if true
|
||||
}
|
||||
|
||||
func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
|
||||
if base+size < base {
|
||||
// Chop off the last byte. The runtime isn't prepared
|
||||
// to deal with situations where the bounds could overflow.
|
||||
// Leave that memory reserved, though, so we don't map it
|
||||
// later.
|
||||
size -= 1
|
||||
}
|
||||
l.next, l.mapped = base, base
|
||||
l.end = base + size
|
||||
l.mapMemory = mapMemory
|
||||
}
|
||||
|
||||
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
|
||||
p := alignUp(l.next, align)
|
||||
if p+size > l.end {
|
||||
return nil
|
||||
}
|
||||
l.next = p + size
|
||||
if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
|
||||
if l.mapMemory {
|
||||
// Transition from Reserved to Prepared to Ready.
|
||||
n := pEnd - l.mapped
|
||||
sysMap(unsafe.Pointer(l.mapped), n, sysStat)
|
||||
sysUsed(unsafe.Pointer(l.mapped), n, n)
|
||||
}
|
||||
l.mapped = pEnd
|
||||
}
|
||||
return unsafe.Pointer(p)
|
||||
}
|
||||
|
||||
// notInHeap is off-heap memory allocated by a lower-level allocator
|
||||
// like sysAlloc or persistentAlloc.
|
||||
//
|
||||
// In general, it's better to use real types which embed
|
||||
// runtime/internal/sys.NotInHeap, but this serves as a generic type
|
||||
// for situations where that isn't possible (like in the allocators).
|
||||
//
|
||||
// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
|
||||
type notInHeap struct{ _ sys.NotInHeap }
|
||||
|
||||
func (p *notInHeap) add(bytes uintptr) *notInHeap {
|
||||
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
|
||||
}
|
||||
|
||||
// computeRZlog computes the size of the redzone.
|
||||
// Refer to the implementation of the compiler-rt.
|
||||
func computeRZlog(userSize uintptr) uintptr {
|
||||
switch {
|
||||
case userSize <= (64 - 16):
|
||||
return 16 << 0
|
||||
case userSize <= (128 - 32):
|
||||
return 16 << 1
|
||||
case userSize <= (512 - 64):
|
||||
return 16 << 2
|
||||
case userSize <= (4096 - 128):
|
||||
return 16 << 3
|
||||
case userSize <= (1<<14)-256:
|
||||
return 16 << 4
|
||||
case userSize <= (1<<15)-512:
|
||||
return 16 << 5
|
||||
case userSize <= (1<<16)-1024:
|
||||
return 16 << 6
|
||||
default:
|
||||
return 16 << 7
|
||||
}
|
||||
}
|
||||
*/
|
||||
Reference in New Issue
Block a user