3 Commits

Author SHA1 Message Date
Vorapol Rinsatitnon
f9b11597fa Update to go1.23.4 2024-12-24 19:40:24 +07:00
Vorapol Rinsatitnon
289b9e3aad Fix patch 4 (1.23.4) 2024-12-24 19:34:49 +07:00
Vorapol Rinsatitnon
51ae6d6612 Update patch 4 (1.23.4) 2024-12-24 19:24:09 +07:00
33 changed files with 785 additions and 262 deletions

View File

@@ -1,2 +1,2 @@
go1.23.3 go1.23.4
time 2024-11-06T18:46:45Z time 2024-11-27T20:27:20Z

View File

@@ -1,6 +1,6 @@
From ed249cf4dbfe31b9cea185100b74c38e5c6e3bd9 Mon Sep 17 00:00:00 2001 From 3e1a3a3c96117fd4d655dd85d2e2c807e691104e Mon Sep 17 00:00:00 2001
From: Vorapol Rinsatitnon <vorapol.r@pm.me> From: Vorapol Rinsatitnon <vorapol.r@pm.me>
Date: Sat, 9 Nov 2024 18:54:55 +1100 Date: Tue, 24 Dec 2024 19:31:25 +0700
Subject: [PATCH] Add back LoadLibraryA fallback Subject: [PATCH] Add back LoadLibraryA fallback
--- ---
@@ -14,7 +14,7 @@ Subject: [PATCH] Add back LoadLibraryA fallback
7 files changed, 136 insertions(+), 7 deletions(-) 7 files changed, 136 insertions(+), 7 deletions(-)
diff --git a/src/runtime/export_windows_test.go b/src/runtime/export_windows_test.go diff --git a/src/runtime/export_windows_test.go b/src/runtime/export_windows_test.go
index 4880e62a55..8bfff0bc93 100644 index 4880e62..8bfff0b 100644
--- a/src/runtime/export_windows_test.go --- a/src/runtime/export_windows_test.go
+++ b/src/runtime/export_windows_test.go +++ b/src/runtime/export_windows_test.go
@@ -36,3 +36,7 @@ func NewContextStub() *ContextStub { @@ -36,3 +36,7 @@ func NewContextStub() *ContextStub {
@@ -26,7 +26,7 @@ index 4880e62a55..8bfff0bc93 100644
+ return useLoadLibraryEx, _LoadLibraryExW != nil, _AddDllDirectory != nil + return useLoadLibraryEx, _LoadLibraryExW != nil, _AddDllDirectory != nil
+} +}
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index 02735802d4..c76df9da22 100644 index 0273580..c76df9d 100644
--- a/src/runtime/os_windows.go --- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go
@@ -41,6 +41,7 @@ const ( @@ -41,6 +41,7 @@ const (
@@ -139,7 +139,7 @@ index 02735802d4..c76df9da22 100644
// osRelaxMinNS indicates that sysmon shouldn't osRelax if the next // osRelaxMinNS indicates that sysmon shouldn't osRelax if the next
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index 85b1b8c902..eb808feea5 100644 index 85b1b8c..eb808fe 100644
--- a/src/runtime/syscall_windows.go --- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go
@@ -413,10 +413,23 @@ func callbackWrap(a *callbackArgs) { @@ -413,10 +413,23 @@ func callbackWrap(a *callbackArgs) {
@@ -169,7 +169,7 @@ index 85b1b8c902..eb808feea5 100644
err = 0 err = 0
} }
diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go
index 156cf3eb8e..2db5b61a9b 100644 index 156cf3e..2db5b61 100644
--- a/src/runtime/syscall_windows_test.go --- a/src/runtime/syscall_windows_test.go
+++ b/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go
@@ -1166,7 +1166,10 @@ uintptr_t cfunc(void) { @@ -1166,7 +1166,10 @@ uintptr_t cfunc(void) {
@@ -210,11 +210,11 @@ index 156cf3eb8e..2db5b61a9b 100644
modwinmm = syscall.NewLazyDLL("winmm.dll") modwinmm = syscall.NewLazyDLL("winmm.dll")
modkernel32 = syscall.NewLazyDLL("kernel32.dll") modkernel32 = syscall.NewLazyDLL("kernel32.dll")
diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go
index 81134cb0bd..b3554d349b 100644 index a7873e6..bd82b51 100644
--- a/src/syscall/dll_windows.go --- a/src/syscall/dll_windows.go
+++ b/src/syscall/dll_windows.go +++ b/src/syscall/dll_windows.go
@@ -44,7 +44,7 @@ func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a @@ -45,7 +45,7 @@ func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a
//go:noescape
func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno)
func loadlibrary(filename *uint16) (handle uintptr, err Errno) func loadlibrary(filename *uint16) (handle uintptr, err Errno)
-func loadsystemlibrary(filename *uint16) (handle uintptr, err Errno) -func loadsystemlibrary(filename *uint16) (handle uintptr, err Errno)
@@ -222,7 +222,7 @@ index 81134cb0bd..b3554d349b 100644
func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err Errno) func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err Errno)
// A DLL implements access to a single DLL. // A DLL implements access to a single DLL.
@@ -53,6 +53,26 @@ type DLL struct { @@ -54,6 +54,26 @@ type DLL struct {
Handle Handle Handle Handle
} }
@@ -249,7 +249,7 @@ index 81134cb0bd..b3554d349b 100644
// LoadDLL loads the named DLL file into memory. // LoadDLL loads the named DLL file into memory.
// //
// If name is not an absolute path and is not a known system DLL used by // If name is not an absolute path and is not a known system DLL used by
@@ -69,7 +89,11 @@ func LoadDLL(name string) (*DLL, error) { @@ -70,7 +90,11 @@ func LoadDLL(name string) (*DLL, error) {
var h uintptr var h uintptr
var e Errno var e Errno
if sysdll.IsSystemDLL[name] { if sysdll.IsSystemDLL[name] {
@@ -263,7 +263,7 @@ index 81134cb0bd..b3554d349b 100644
h, e = loadlibrary(namep) h, e = loadlibrary(namep)
} }
diff --git a/src/syscall/security_windows.go b/src/syscall/security_windows.go diff --git a/src/syscall/security_windows.go b/src/syscall/security_windows.go
index 4e988c418a..45b1908b71 100644 index 4e988c4..45b1908 100644
--- a/src/syscall/security_windows.go --- a/src/syscall/security_windows.go
+++ b/src/syscall/security_windows.go +++ b/src/syscall/security_windows.go
@@ -290,6 +290,7 @@ type Tokenprimarygroup struct { @@ -290,6 +290,7 @@ type Tokenprimarygroup struct {
@@ -275,7 +275,7 @@ index 4e988c418a..45b1908b71 100644
// An access token contains the security information for a logon session. // An access token contains the security information for a logon session.
// The system creates an access token when a user logs on, and every // The system creates an access token when a user logs on, and every
diff --git a/src/syscall/zsyscall_windows.go b/src/syscall/zsyscall_windows.go diff --git a/src/syscall/zsyscall_windows.go b/src/syscall/zsyscall_windows.go
index d8d8594a55..28369e3b91 100644 index d8d8594..28369e3 100644
--- a/src/syscall/zsyscall_windows.go --- a/src/syscall/zsyscall_windows.go
+++ b/src/syscall/zsyscall_windows.go +++ b/src/syscall/zsyscall_windows.go
@@ -128,6 +128,7 @@ var ( @@ -128,6 +128,7 @@ var (
@@ -303,5 +303,5 @@ index d8d8594a55..28369e3b91 100644
Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
return return
-- --
2.47.0 2.39.5

View File

@@ -33,7 +33,7 @@ import (
"unicode" "unicode"
) )
var globalSkip = func(t *testing.T) {} var globalSkip = func(t testing.TB) {}
// Program to run. // Program to run.
var bin []string var bin []string
@@ -59,12 +59,12 @@ func TestMain(m *testing.M) {
func testMain(m *testing.M) int { func testMain(m *testing.M) int {
if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") } globalSkip = func(t testing.TB) { t.Skip("short mode and $GO_BUILDER_NAME not set") }
return m.Run() return m.Run()
} }
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
if _, err := os.Stat("/etc/alpine-release"); err == nil { if _, err := os.Stat("/etc/alpine-release"); err == nil {
globalSkip = func(t *testing.T) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } globalSkip = func(t testing.TB) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") }
return m.Run() return m.Run()
} }
} }
@@ -1291,8 +1291,8 @@ func TestPreemption(t *testing.T) {
} }
} }
// Issue 59294. Test calling Go function from C after using some // Issue 59294 and 68285. Test calling Go function from C after with
// stack space. // various stack space.
func TestDeepStack(t *testing.T) { func TestDeepStack(t *testing.T) {
globalSkip(t) globalSkip(t)
testenv.MustHaveGoBuild(t) testenv.MustHaveGoBuild(t)
@@ -1350,6 +1350,53 @@ func TestDeepStack(t *testing.T) {
} }
} }
func BenchmarkCgoCallbackMainThread(b *testing.B) {
// Benchmark for calling into Go fron C main thread.
// See issue #68587.
//
// It uses a subprocess, which is a C binary that calls
// Go on the main thread b.N times. There is some overhead
// for launching the subprocess. It is probably fine when
// b.N is large.
globalSkip(b)
testenv.MustHaveGoBuild(b)
testenv.MustHaveCGO(b)
testenv.MustHaveBuildMode(b, "c-archive")
if !testWork {
defer func() {
os.Remove("testp10" + exeSuffix)
os.Remove("libgo10.a")
os.Remove("libgo10.h")
}()
}
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo10.a", "./libgo10")
out, err := cmd.CombinedOutput()
b.Logf("%v\n%s", cmd.Args, out)
if err != nil {
b.Fatal(err)
}
ccArgs := append(cc, "-o", "testp10"+exeSuffix, "main10.c", "libgo10.a")
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
b.Logf("%v\n%s", ccArgs, out)
if err != nil {
b.Fatal(err)
}
argv := cmdToRun("./testp10")
argv = append(argv, fmt.Sprint(b.N))
cmd = exec.Command(argv[0], argv[1:]...)
b.ResetTimer()
err = cmd.Run()
if err != nil {
b.Fatal(err)
}
}
func TestSharedObject(t *testing.T) { func TestSharedObject(t *testing.T) {
// Test that we can put a Go c-archive into a C shared object. // Test that we can put a Go c-archive into a C shared object.
globalSkip(t) globalSkip(t)

View File

@@ -0,0 +1,12 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "C"
//export GoF
func GoF() {}
func main() {}

View File

@@ -6,9 +6,29 @@ package main
import "runtime" import "runtime"
// extern void callGoWithVariousStack(int);
import "C" import "C"
func main() {} func main() {}
//export GoF //export GoF
func GoF() { runtime.GC() } func GoF(p int32) {
runtime.GC()
if p != 0 {
panic("panic")
}
}
//export callGoWithVariousStackAndGoFrame
func callGoWithVariousStackAndGoFrame(p int32) {
if p != 0 {
defer func() {
e := recover()
if e == nil {
panic("did not panic")
}
runtime.GC()
}()
}
C.callGoWithVariousStack(C.int(p));
}

View File

@@ -0,0 +1,22 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <stdio.h>
#include <stdlib.h>
#include "libgo10.h"
int main(int argc, char **argv) {
int n, i;
if (argc != 2) {
perror("wrong arg");
return 2;
}
n = atoi(argv[1]);
for (i = 0; i < n; i++)
GoF();
return 0;
}

View File

@@ -6,19 +6,27 @@
void use(int *x) { (*x)++; } void use(int *x) { (*x)++; }
void callGoFWithDeepStack() { void callGoFWithDeepStack(int p) {
int x[10000]; int x[10000];
use(&x[0]); use(&x[0]);
use(&x[9999]); use(&x[9999]);
GoF(); GoF(p);
use(&x[0]); use(&x[0]);
use(&x[9999]); use(&x[9999]);
} }
int main() { void callGoWithVariousStack(int p) {
GoF(); // call GoF without using much stack GoF(0); // call GoF without using much stack
callGoFWithDeepStack(); // call GoF with a deep stack callGoFWithDeepStack(p); // call GoF with a deep stack
GoF(0); // again on a shallow stack
}
int main() {
callGoWithVariousStack(0);
callGoWithVariousStackAndGoFrame(0); // normal execution
callGoWithVariousStackAndGoFrame(1); // panic and recover
} }

View File

@@ -318,9 +318,10 @@ func containsClosure(f, c *ir.Func) bool {
return false return false
} }
// Closures within function Foo are named like "Foo.funcN..." or "Foo-rangeN". for p := c.ClosureParent; p != nil; p = p.ClosureParent {
// TODO(mdempsky): Better way to recognize this. if p == f {
fn := f.Sym().Name return true
cn := c.Sym().Name }
return len(cn) > len(fn) && cn[:len(fn)] == fn && (cn[len(fn)] == '.' || cn[len(fn)] == '-') }
return false
} }

View File

@@ -582,6 +582,23 @@ func TestIssue25596(t *testing.T) {
compileAndImportPkg(t, "issue25596") compileAndImportPkg(t, "issue25596")
} }
func TestIssue70394(t *testing.T) {
testenv.MustHaveGoBuild(t)
// This package only handles gc export data.
if runtime.Compiler != "gc" {
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
pkg := compileAndImportPkg(t, "alias")
obj := lookupObj(t, pkg.Scope(), "A")
typ := obj.Type()
if _, ok := typ.(*types2.Alias); !ok {
t.Fatalf("type of %s is %s, wanted an alias", obj, typ)
}
}
func importPkg(t *testing.T, path, srcDir string) *types2.Package { func importPkg(t *testing.T, path, srcDir string) *types2.Package {
pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil) pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil)
if err != nil { if err != nil {

View File

@@ -0,0 +1,7 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testdata
type A = int32

View File

@@ -31,9 +31,7 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input
ctxt: ctxt, ctxt: ctxt,
imports: imports, imports: imports,
// Currently, the compiler panics when using Alias types. enableAlias: true,
// TODO(gri) set to true once this is fixed (issue #66873)
enableAlias: false,
posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)), posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)),
pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)), pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)),

View File

@@ -51,6 +51,8 @@ import (
// the generated ODCLFUNC, but there is no // the generated ODCLFUNC, but there is no
// pointer from the Func back to the OMETHVALUE. // pointer from the Func back to the OMETHVALUE.
type Func struct { type Func struct {
// if you add or remove a field, don't forget to update sizeof_test.go
miniNode miniNode
Body Nodes Body Nodes
@@ -76,6 +78,9 @@ type Func struct {
// Populated during walk. // Populated during walk.
Closures []*Func Closures []*Func
// Parent of a closure
ClosureParent *Func
// Parents records the parent scope of each scope within a // Parents records the parent scope of each scope within a
// function. The root scope (0) has no parent, so the i'th // function. The root scope (0) has no parent, so the i'th
// scope's parent is stored at Parents[i-1]. // scope's parent is stored at Parents[i-1].
@@ -512,6 +517,7 @@ func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func,
fn.Nname.Defn = fn fn.Nname.Defn = fn
pkg.Funcs = append(pkg.Funcs, fn) pkg.Funcs = append(pkg.Funcs, fn)
fn.ClosureParent = outerfn
return fn return fn
} }

View File

@@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) {
_32bit uintptr // size on 32bit platforms _32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms _64bit uintptr // size on 64bit platforms
}{ }{
{Func{}, 176, 296}, {Func{}, 180, 304},
{Name{}, 96, 168}, {Name{}, 96, 168},
} }

View File

@@ -2099,3 +2099,27 @@ func TestTwoLevelReturnCheck(t *testing.T) {
t.Errorf("Expected y=3, got y=%d\n", y) t.Errorf("Expected y=3, got y=%d\n", y)
} }
} }
func Bug70035(s1, s2, s3 []string) string {
var c1 string
for v1 := range slices.Values(s1) {
var c2 string
for v2 := range slices.Values(s2) {
var c3 string
for v3 := range slices.Values(s3) {
c3 = c3 + v3
}
c2 = c2 + v2 + c3
}
c1 = c1 + v1 + c2
}
return c1
}
func Test70035(t *testing.T) {
got := Bug70035([]string{"1", "2", "3"}, []string{"a", "b", "c"}, []string{"A", "B", "C"})
want := "1aABCbABCcABC2aABCbABCcABC3aABCbABCcABC"
if got != want {
t.Errorf("got %v, want %v", got, want)
}
}

View File

@@ -257,6 +257,10 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) {
if gs.lastStopStack != trace.NoStack { if gs.lastStopStack != trace.NoStack {
stk = ctx.Stack(viewerFrames(gs.lastStopStack)) stk = ctx.Stack(viewerFrames(gs.lastStopStack))
} }
var endStk int
if stack != trace.NoStack {
endStk = ctx.Stack(viewerFrames(stack))
}
// Check invariants. // Check invariants.
if gs.startRunningTime == 0 { if gs.startRunningTime == 0 {
panic("silently broken trace or generator invariant (startRunningTime != 0) not held") panic("silently broken trace or generator invariant (startRunningTime != 0) not held")
@@ -270,6 +274,7 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) {
Dur: ts.Sub(gs.startRunningTime), Dur: ts.Sub(gs.startRunningTime),
Resource: uint64(gs.executing), Resource: uint64(gs.executing),
Stack: stk, Stack: stk,
EndStack: endStk,
}) })
// Flush completed ranges. // Flush completed ranges.

View File

@@ -72,6 +72,6 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error,
if err == syscall.EAGAIN { if err == syscall.EAGAIN {
err = nil err = nil
} }
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL && err != syscall.EOPNOTSUPP && err != syscall.ENOTSUP)
return return
} }

View File

@@ -53,6 +53,9 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) {
if err != nil { if err != nil {
return 0, err, false return 0, err, false
} }
if fi.Mode()&(fs.ModeSymlink|fs.ModeDevice|fs.ModeCharDevice|fs.ModeIrregular) != 0 {
return 0, nil, false
}
remain = fi.Size() remain = fi.Size()
} }

View File

@@ -0,0 +1,86 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build unix
package net
import (
"internal/testpty"
"io"
"os"
"sync"
"syscall"
"testing"
)
// Issue 70763: test that we don't fail on sendfile from a tty.
func TestCopyFromTTY(t *testing.T) {
pty, ttyName, err := testpty.Open()
if err != nil {
t.Skipf("skipping test because pty open failed: %v", err)
}
defer pty.Close()
// Use syscall.Open so that the tty is blocking.
ttyFD, err := syscall.Open(ttyName, syscall.O_RDWR, 0)
if err != nil {
t.Skipf("skipping test because tty open failed: %v", err)
}
defer syscall.Close(ttyFD)
tty := os.NewFile(uintptr(ttyFD), "tty")
defer tty.Close()
ln := newLocalListener(t, "tcp")
defer ln.Close()
ch := make(chan bool)
const data = "data\n"
var wg sync.WaitGroup
defer wg.Wait()
wg.Add(1)
go func() {
defer wg.Done()
conn, err := ln.Accept()
if err != nil {
t.Error(err)
return
}
defer conn.Close()
buf := make([]byte, len(data))
if _, err := io.ReadFull(conn, buf); err != nil {
t.Error(err)
}
ch <- true
}()
conn, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
wg.Add(1)
go func() {
defer wg.Done()
if _, err := pty.Write([]byte(data)); err != nil {
t.Error(err)
}
<-ch
if err := pty.Close(); err != nil {
t.Error(err)
}
}()
lr := io.LimitReader(tty, int64(len(data)))
if _, err := io.Copy(conn, lr); err != nil {
t.Error(err)
}
}

View File

@@ -31,10 +31,11 @@ x_cgo_getstackbound(uintptr bounds[2])
pthread_attr_get_np(pthread_self(), &attr); pthread_attr_get_np(pthread_self(), &attr);
pthread_attr_getstack(&attr, &addr, &size); // low address pthread_attr_getstack(&attr, &addr, &size); // low address
#else #else
// We don't know how to get the current stacks, so assume they are the // We don't know how to get the current stacks, leave it as
// same as the default stack bounds. // 0 and the caller will use an estimate based on the current
pthread_attr_getstacksize(&attr, &size); // SP.
addr = __builtin_frame_address(0) + 4096 - size; addr = 0;
size = 0;
#endif #endif
pthread_attr_destroy(&attr); pthread_attr_destroy(&attr);

View File

@@ -231,34 +231,6 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
g0 := mp.g0 g0 := mp.g0
inBound := sp > g0.stack.lo && sp <= g0.stack.hi
if mp.ncgo > 0 && !inBound {
// ncgo > 0 indicates that this M was in Go further up the stack
// (it called C and is now receiving a callback).
//
// !inBound indicates that we were called with SP outside the
// expected system stack bounds (C changed the stack out from
// under us between the cgocall and cgocallback?).
//
// It is not safe for the C call to change the stack out from
// under us, so throw.
// Note that this case isn't possible for signal == true, as
// that is always passing a new M from needm.
// Stack is bogus, but reset the bounds anyway so we can print.
hi := g0.stack.hi
lo := g0.stack.lo
g0.stack.hi = sp + 1024
g0.stack.lo = sp - 32*1024
g0.stackguard0 = g0.stack.lo + stackGuard
g0.stackguard1 = g0.stackguard0
print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]")
print("\n")
exit(2)
}
if !mp.isextra { if !mp.isextra {
// We allocated the stack for standard Ms. Don't replace the // We allocated the stack for standard Ms. Don't replace the
// stack bounds with estimated ones when we already initialized // stack bounds with estimated ones when we already initialized
@@ -266,26 +238,37 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
return return
} }
// This M does not have Go further up the stack. However, it may have inBound := sp > g0.stack.lo && sp <= g0.stack.hi
// previously called into Go, initializing the stack bounds. Between if inBound && mp.g0StackAccurate {
// that call returning and now the stack may have changed (perhaps the // This M has called into Go before and has the stack bounds
// C thread is running a coroutine library). We need to update the // initialized. We have the accurate stack bounds, and the SP
// stack bounds for this case. // is in bounds. We expect it continues to run within the same
// bounds.
return
}
// We don't have an accurate stack bounds (either it never calls
// into Go before, or we couldn't get the accurate bounds), or the
// current SP is not within the previous bounds (the stack may have
// changed between calls). We need to update the stack bounds.
// //
// N.B. we need to update the stack bounds even if SP appears to // N.B. we need to update the stack bounds even if SP appears to
// already be in bounds. Our "bounds" may actually be estimated dummy // already be in bounds, if our bounds are estimated dummy bounds
// bounds (below). The actual stack bounds could have shifted but still // (below). We may be in a different region within the same actual
// have partial overlap with our dummy bounds. If we failed to update // stack bounds, but our estimates were not accurate. Or the actual
// in that case, we could find ourselves seemingly called near the // stack bounds could have shifted but still have partial overlap with
// bottom of the stack bounds, where we quickly run out of space. // our dummy bounds. If we failed to update in that case, we could find
// ourselves seemingly called near the bottom of the stack bounds, where
// we quickly run out of space.
// Set the stack bounds to match the current stack. If we don't // Set the stack bounds to match the current stack. If we don't
// actually know how big the stack is, like we don't know how big any // actually know how big the stack is, like we don't know how big any
// scheduling stack is, but we assume there's at least 32 kB. If we // scheduling stack is, but we assume there's at least 32 kB. If we
// can get a more accurate stack bound from pthread, use that, provided // can get a more accurate stack bound from pthread, use that, provided
// it actually contains SP.. // it actually contains SP.
g0.stack.hi = sp + 1024 g0.stack.hi = sp + 1024
g0.stack.lo = sp - 32*1024 g0.stack.lo = sp - 32*1024
mp.g0StackAccurate = false
if !signal && _cgo_getstackbound != nil { if !signal && _cgo_getstackbound != nil {
// Don't adjust if called from the signal handler. // Don't adjust if called from the signal handler.
// We are on the signal stack, not the pthread stack. // We are on the signal stack, not the pthread stack.
@@ -296,12 +279,16 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds)) asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
// getstackbound is an unsupported no-op on Windows. // getstackbound is an unsupported no-op on Windows.
// //
// On Unix systems, if the API to get accurate stack bounds is
// not available, it returns zeros.
//
// Don't use these bounds if they don't contain SP. Perhaps we // Don't use these bounds if they don't contain SP. Perhaps we
// were called by something not using the standard thread // were called by something not using the standard thread
// stack. // stack.
if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] {
g0.stack.lo = bounds[0] g0.stack.lo = bounds[0]
g0.stack.hi = bounds[1] g0.stack.hi = bounds[1]
mp.g0StackAccurate = true
} }
} }
g0.stackguard0 = g0.stack.lo + stackGuard g0.stackguard0 = g0.stack.lo + stackGuard
@@ -319,6 +306,8 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
} }
sp := gp.m.g0.sched.sp // system sp saved by cgocallback. sp := gp.m.g0.sched.sp // system sp saved by cgocallback.
oldStack := gp.m.g0.stack
oldAccurate := gp.m.g0StackAccurate
callbackUpdateSystemStack(gp.m, sp, false) callbackUpdateSystemStack(gp.m, sp, false)
// The call from C is on gp.m's g0 stack, so we must ensure // The call from C is on gp.m's g0 stack, so we must ensure
@@ -380,6 +369,12 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp)) reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp))
gp.m.winsyscall = winsyscall gp.m.winsyscall = winsyscall
// Restore the old g0 stack bounds
gp.m.g0.stack = oldStack
gp.m.g0.stackguard0 = oldStack.lo + stackGuard
gp.m.g0.stackguard1 = gp.m.g0.stackguard0
gp.m.g0StackAccurate = oldAccurate
} }
func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) {

View File

@@ -1886,3 +1886,30 @@ func (m *TraceMap) PutString(s string) (uint64, bool) {
func (m *TraceMap) Reset() { func (m *TraceMap) Reset() {
m.traceMap.reset() m.traceMap.reset()
} }
func SetSpinInGCMarkDone(spin bool) {
gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
}
func GCMarkDoneRestarted() bool {
// Only read this outside of the GC. If we're running during a GC, just report false.
mp := acquirem()
if gcphase != _GCoff {
releasem(mp)
return false
}
restarted := gcDebugMarkDone.restartedDueTo27993
releasem(mp)
return restarted
}
func GCMarkDoneResetRestartFlag() {
mp := acquirem()
for gcphase != _GCoff {
releasem(mp)
Gosched()
mp = acquirem()
}
gcDebugMarkDone.restartedDueTo27993 = false
releasem(mp)
}

View File

@@ -6,6 +6,8 @@ package runtime_test
import ( import (
"fmt" "fmt"
"internal/testenv"
"internal/weak"
"math/bits" "math/bits"
"math/rand" "math/rand"
"os" "os"
@@ -787,3 +789,78 @@ func TestMemoryLimitNoGCPercent(t *testing.T) {
func TestMyGenericFunc(t *testing.T) { func TestMyGenericFunc(t *testing.T) {
runtime.MyGenericFunc[int]() runtime.MyGenericFunc[int]()
} }
func TestWeakToStrongMarkTermination(t *testing.T) {
testenv.MustHaveParallelism(t)
type T struct {
a *int
b int
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
defer debug.SetGCPercent(debug.SetGCPercent(-1))
w := make([]weak.Pointer[T], 2048)
// Make sure there's no out-standing GC from a previous test.
runtime.GC()
// Create many objects with a weak pointers to them.
for i := range w {
x := new(T)
x.a = new(int)
w[i] = weak.Make(x)
}
// Reset the restart flag.
runtime.GCMarkDoneResetRestartFlag()
// Prevent mark termination from completing.
runtime.SetSpinInGCMarkDone(true)
// Start a GC, and wait a little bit to get something spinning in mark termination.
// Simultaneously, fire off another goroutine to disable spinning. If everything's
// working correctly, then weak.Strong will block, so we need to make sure something
// prevents the GC from continuing to spin.
done := make(chan struct{})
go func() {
runtime.GC()
done <- struct{}{}
}()
go func() {
time.Sleep(100 * time.Millisecond)
// Let mark termination continue.
runtime.SetSpinInGCMarkDone(false)
}()
time.Sleep(10 * time.Millisecond)
// Perform many weak->strong conversions in the critical window.
var wg sync.WaitGroup
for _, wp := range w {
wg.Add(1)
go func() {
defer wg.Done()
wp.Strong()
}()
}
// Make sure the GC completes.
<-done
// Make sure all the weak->strong conversions finish.
wg.Wait()
// The bug is triggered if there's still mark work after gcMarkDone stops the world.
//
// This can manifest in one of two ways today:
// - An exceedingly rare crash in mark termination.
// - gcMarkDone restarts, as if issue #27993 is at play.
//
// Check for the latter. This is a fairly controlled environment, so #27993 is very
// unlikely to happen (it's already rare to begin with) but we'll always _appear_ to
// trigger the same bug if weak->strong conversions aren't properly coordinated with
// mark termination.
if runtime.GCMarkDoneRestarted() {
t.Errorf("gcMarkDone restarted")
}
}

View File

@@ -17,6 +17,7 @@ const (
lockRankDefer lockRankDefer
lockRankSweepWaiters lockRankSweepWaiters
lockRankAssistQueue lockRankAssistQueue
lockRankStrongFromWeakQueue
lockRankSweep lockRankSweep
lockRankTestR lockRankTestR
lockRankTestW lockRankTestW
@@ -90,6 +91,7 @@ var lockNames = []string{
lockRankDefer: "defer", lockRankDefer: "defer",
lockRankSweepWaiters: "sweepWaiters", lockRankSweepWaiters: "sweepWaiters",
lockRankAssistQueue: "assistQueue", lockRankAssistQueue: "assistQueue",
lockRankStrongFromWeakQueue: "strongFromWeakQueue",
lockRankSweep: "sweep", lockRankSweep: "sweep",
lockRankTestR: "testR", lockRankTestR: "testR",
lockRankTestW: "testW", lockRankTestW: "testW",
@@ -169,6 +171,7 @@ var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankDefer: {}, lockRankDefer: {},
lockRankSweepWaiters: {}, lockRankSweepWaiters: {},
lockRankAssistQueue: {}, lockRankAssistQueue: {},
lockRankStrongFromWeakQueue: {},
lockRankSweep: {}, lockRankSweep: {},
lockRankTestR: {}, lockRankTestR: {},
lockRankTestW: {}, lockRankTestW: {},
@@ -180,11 +183,11 @@ var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankPollDesc: {}, lockRankPollDesc: {},
lockRankWakeableSleep: {}, lockRankWakeableSleep: {},
lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan},
lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan},
lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan},
lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR},
lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
lockRankNotifyList: {}, lockRankNotifyList: {},
lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList},
lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers},
@@ -196,29 +199,29 @@ var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankUserArenaState: {}, lockRankUserArenaState: {},
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
lockRankPanic: {}, lockRankPanic: {},
lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
lockRankRaceFini: {lockRankPanic}, lockRankRaceFini: {lockRankPanic},
lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR},
lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR},
lockRankTestRInternal: {lockRankTestR, lockRankTestW}, lockRankTestRInternal: {lockRankTestR, lockRankTestW},
} }

View File

@@ -190,6 +190,7 @@ func gcinit() {
work.markDoneSema = 1 work.markDoneSema = 1
lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters) lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
lockInit(&work.assistQueue.lock, lockRankAssistQueue) lockInit(&work.assistQueue.lock, lockRankAssistQueue)
lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue)
lockInit(&work.wbufSpans.lock, lockRankWbufSpans) lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
} }
@@ -418,6 +419,26 @@ type workType struct {
list gList list gList
} }
// strongFromWeak controls how the GC interacts with weak->strong
// pointer conversions.
strongFromWeak struct {
// block is a flag set during mark termination that prevents
// new weak->strong conversions from executing by blocking the
// goroutine and enqueuing it onto q.
//
// Mutated only by one goroutine at a time in gcMarkDone,
// with globally-synchronizing events like forEachP and
// stopTheWorld.
block bool
// q is a queue of goroutines that attempted to perform a
// weak->strong conversion during mark termination.
//
// Protected by lock.
lock mutex
q gQueue
}
// cycles is the number of completed GC cycles, where a GC // cycles is the number of completed GC cycles, where a GC
// cycle is sweep termination, mark, mark termination, and // cycle is sweep termination, mark, mark termination, and
// sweep. This differs from memstats.numgc, which is // sweep. This differs from memstats.numgc, which is
@@ -800,6 +821,19 @@ func gcStart(trigger gcTrigger) {
// This is protected by markDoneSema. // This is protected by markDoneSema.
var gcMarkDoneFlushed uint32 var gcMarkDoneFlushed uint32
// gcDebugMarkDone contains fields used to debug/test mark termination.
var gcDebugMarkDone struct {
// spinAfterRaggedBarrier forces gcMarkDone to spin after it executes
// the ragged barrier.
spinAfterRaggedBarrier atomic.Bool
// restartedDueTo27993 indicates that we restarted mark termination
// due to the bug described in issue #27993.
//
// Protected by worldsema.
restartedDueTo27993 bool
}
// gcMarkDone transitions the GC from mark to mark termination if all // gcMarkDone transitions the GC from mark to mark termination if all
// reachable objects have been marked (that is, there are no grey // reachable objects have been marked (that is, there are no grey
// objects and can be no more in the future). Otherwise, it flushes // objects and can be no more in the future). Otherwise, it flushes
@@ -842,6 +876,10 @@ top:
// stop the world later, so acquire worldsema now. // stop the world later, so acquire worldsema now.
semacquire(&worldsema) semacquire(&worldsema)
// Prevent weak->strong conversions from generating additional
// GC work. forEachP will guarantee that it is observed globally.
work.strongFromWeak.block = true
// Flush all local buffers and collect flushedWork flags. // Flush all local buffers and collect flushedWork flags.
gcMarkDoneFlushed = 0 gcMarkDoneFlushed = 0
forEachP(waitReasonGCMarkTermination, func(pp *p) { forEachP(waitReasonGCMarkTermination, func(pp *p) {
@@ -872,6 +910,10 @@ top:
goto top goto top
} }
// For debugging/testing.
for gcDebugMarkDone.spinAfterRaggedBarrier.Load() {
}
// There was no global work, no local work, and no Ps // There was no global work, no local work, and no Ps
// communicated work since we took markDoneSema. Therefore // communicated work since we took markDoneSema. Therefore
// there are no grey objects and no more objects can be // there are no grey objects and no more objects can be
@@ -910,6 +952,8 @@ top:
} }
}) })
if restart { if restart {
gcDebugMarkDone.restartedDueTo27993 = true
getg().m.preemptoff = "" getg().m.preemptoff = ""
systemstack(func() { systemstack(func() {
// Accumulate the time we were stopped before we had to start again. // Accumulate the time we were stopped before we had to start again.
@@ -936,6 +980,11 @@ top:
// start the world again. // start the world again.
gcWakeAllAssists() gcWakeAllAssists()
// Wake all blocked weak->strong conversions. These will run
// when we start the world again.
work.strongFromWeak.block = false
gcWakeAllStrongFromWeak()
// Likewise, release the transition lock. Blocked // Likewise, release the transition lock. Blocked
// workers and assists will run when we start the // workers and assists will run when we start the
// world again. // world again.

View File

@@ -2049,8 +2049,19 @@ func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer
func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer {
handle := (*atomic.Uintptr)(u) handle := (*atomic.Uintptr)(u)
// Prevent preemption. We want to make sure that another GC cycle can't start. // Prevent preemption. We want to make sure that another GC cycle can't start
// and that work.strongFromWeak.block can't change out from under us.
mp := acquirem() mp := acquirem()
// Yield to the GC if necessary.
if work.strongFromWeak.block {
releasem(mp)
// Try to park and wait for mark termination.
// N.B. gcParkStrongFromWeak calls acquirem before returning.
mp = gcParkStrongFromWeak()
}
p := handle.Load() p := handle.Load()
if p == 0 { if p == 0 {
releasem(mp) releasem(mp)
@@ -2092,6 +2103,41 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer {
return ptr return ptr
} }
// gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks.
func gcParkStrongFromWeak() *m {
// Prevent preemption as we check strongFromWeak, so it can't change out from under us.
mp := acquirem()
for work.strongFromWeak.block {
lock(&work.strongFromWeak.lock)
releasem(mp) // N.B. Holding the lock prevents preemption.
// Queue ourselves up.
work.strongFromWeak.q.pushBack(getg())
// Park.
goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2)
// Re-acquire the current M since we're going to check the condition again.
mp = acquirem()
// Re-check condition. We may have awoken in the next GC's mark termination phase.
}
return mp
}
// gcWakeAllStrongFromWeak wakes all currently blocked weak->strong
// conversions. This is used at the end of a GC cycle.
//
// work.strongFromWeak.block must be false to prevent woken goroutines
// from immediately going back to sleep.
func gcWakeAllStrongFromWeak() {
lock(&work.strongFromWeak.lock)
list := work.strongFromWeak.q.popList()
injectglist(&list)
unlock(&work.strongFromWeak.lock)
}
// Retrieves or creates a weak pointer handle for the object p. // Retrieves or creates a weak pointer handle for the object p.
func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
// First try to retrieve without allocating. // First try to retrieve without allocating.
@@ -2126,8 +2172,14 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
// Keep p alive for the duration of the function to ensure // Keep p alive for the duration of the function to ensure
// that it cannot die while we're trying to do this. // that it cannot die while we're trying to do this.
//
// Same for handle, which is only stored in the special.
// There's a window where it might die if we don't keep it
// alive explicitly. Returning it here is probably good enough,
// but let's be defensive and explicit. See #70455.
KeepAlive(p) KeepAlive(p)
return s.handle KeepAlive(handle)
return handle
} }
// There was an existing handle. Free the special // There was an existing handle. Free the special
@@ -2147,7 +2199,10 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
// Keep p alive for the duration of the function to ensure // Keep p alive for the duration of the function to ensure
// that it cannot die while we're trying to do this. // that it cannot die while we're trying to do this.
//
// Same for handle, just to be defensive.
KeepAlive(p) KeepAlive(p)
KeepAlive(handle)
return handle return handle
} }

View File

@@ -50,6 +50,7 @@ NONE < defer;
NONE < NONE <
sweepWaiters, sweepWaiters,
assistQueue, assistQueue,
strongFromWeakQueue,
sweep; sweep;
# Test only # Test only
@@ -66,6 +67,7 @@ assistQueue,
hchan, hchan,
pollDesc, # pollDesc can interact with timers, which can lock sched. pollDesc, # pollDesc can interact with timers, which can lock sched.
scavenge, scavenge,
strongFromWeakQueue,
sweep, sweep,
sweepWaiters, sweepWaiters,
testR, testR,

View File

@@ -2539,6 +2539,7 @@ func dropm() {
g0.stack.lo = 0 g0.stack.lo = 0
g0.stackguard0 = 0 g0.stackguard0 = 0
g0.stackguard1 = 0 g0.stackguard1 = 0
mp.g0StackAccurate = false
putExtraM(mp) putExtraM(mp)

View File

@@ -584,6 +584,7 @@ type m struct {
isExtraInSig bool // m is an extra m in a signal handler isExtraInSig bool // m is an extra m in a signal handler
freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait)
needextram bool needextram bool
g0StackAccurate bool // whether the g0 stack has accurate bounds
traceback uint8 traceback uint8
ncgocall uint64 // number of cgo calls in total ncgocall uint64 // number of cgo calls in total
ncgo int32 // number of cgo calls currently in progress ncgo int32 // number of cgo calls currently in progress
@@ -1095,6 +1096,7 @@ const (
waitReasonTraceProcStatus // "trace proc status" waitReasonTraceProcStatus // "trace proc status"
waitReasonPageTraceFlush // "page trace flush" waitReasonPageTraceFlush // "page trace flush"
waitReasonCoroutine // "coroutine" waitReasonCoroutine // "coroutine"
waitReasonGCWeakToStrongWait // "GC weak to strong wait"
) )
var waitReasonStrings = [...]string{ var waitReasonStrings = [...]string{
@@ -1135,6 +1137,7 @@ var waitReasonStrings = [...]string{
waitReasonTraceProcStatus: "trace proc status", waitReasonTraceProcStatus: "trace proc status",
waitReasonPageTraceFlush: "page trace flush", waitReasonPageTraceFlush: "page trace flush",
waitReasonCoroutine: "coroutine", waitReasonCoroutine: "coroutine",
waitReasonGCWeakToStrongWait: "GC weak to strong wait",
} }
func (w waitReason) String() string { func (w waitReason) String() string {

View File

@@ -69,7 +69,7 @@ const (
// to each stack below the usual guard area for OS-specific // to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9, // purposes like signal handling. Used on Windows, Plan 9,
// and iOS because they do not use a separate stack. // and iOS because they do not use a separate stack.
stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
// The minimum size of stack used by Go code // The minimum size of stack used by Go code
stackMin = 2048 stackMin = 2048
@@ -1330,7 +1330,7 @@ func morestackc() {
} }
// startingStackSize is the amount of stack that new goroutines start with. // startingStackSize is the amount of stack that new goroutines start with.
// It is a power of 2, and between _FixedStack and maxstacksize, inclusive. // It is a power of 2, and between fixedStack and maxstacksize, inclusive.
// startingStackSize is updated every GC by tracking the average size of // startingStackSize is updated every GC by tracking the average size of
// stacks scanned during the GC. // stacks scanned during the GC.
var startingStackSize uint32 = fixedStack var startingStackSize uint32 = fixedStack

View File

@@ -99,6 +99,7 @@ const (
traceBlockDebugCall traceBlockDebugCall
traceBlockUntilGCEnds traceBlockUntilGCEnds
traceBlockSleep traceBlockSleep
traceBlockGCWeakToStrongWait
) )
var traceBlockReasonStrings = [...]string{ var traceBlockReasonStrings = [...]string{
@@ -117,6 +118,7 @@ var traceBlockReasonStrings = [...]string{
traceBlockDebugCall: "wait for debug call", traceBlockDebugCall: "wait for debug call",
traceBlockUntilGCEnds: "wait until GC ends", traceBlockUntilGCEnds: "wait until GC ends",
traceBlockSleep: "sleep", traceBlockSleep: "sleep",
traceBlockGCWeakToStrongWait: "GC weak to strong wait",
} }
// traceGoStopReason is an enumeration of reasons a goroutine might yield. // traceGoStopReason is an enumeration of reasons a goroutine might yield.

View File

@@ -42,6 +42,7 @@ func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a
// Deprecated: Use [SyscallN] instead. // Deprecated: Use [SyscallN] instead.
func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno) func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno)
//go:noescape
func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno)
func loadlibrary(filename *uint16) (handle uintptr, err Errno) func loadlibrary(filename *uint16) (handle uintptr, err Errno)
func loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle uintptr, err Errno) func loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle uintptr, err Errno)

View File

@@ -213,6 +213,51 @@ func TestGetStartupInfo(t *testing.T) {
} }
} }
func TestSyscallAllocations(t *testing.T) {
testenv.SkipIfOptimizationOff(t)
// Test that syscall.SyscallN arguments do not escape.
// The function used (in this case GetVersion) doesn't matter
// as long as it is always available and doesn't panic.
h, err := syscall.LoadLibrary("kernel32.dll")
if err != nil {
t.Fatal(err)
}
defer syscall.FreeLibrary(h)
proc, err := syscall.GetProcAddress(h, "GetVersion")
if err != nil {
t.Fatal(err)
}
testAllocs := func(t *testing.T, name string, fn func() error) {
t.Run(name, func(t *testing.T) {
n := int(testing.AllocsPerRun(10, func() {
if err := fn(); err != nil {
t.Fatalf("%s: %v", name, err)
}
}))
if n > 0 {
t.Errorf("allocs = %d, want 0", n)
}
})
}
testAllocs(t, "SyscallN", func() error {
r0, _, e1 := syscall.SyscallN(proc, 0, 0, 0)
if r0 == 0 {
return syscall.Errno(e1)
}
return nil
})
testAllocs(t, "Syscall", func() error {
r0, _, e1 := syscall.Syscall(proc, 3, 0, 0, 0)
if r0 == 0 {
return syscall.Errno(e1)
}
return nil
})
}
func FuzzUTF16FromString(f *testing.F) { func FuzzUTF16FromString(f *testing.F) {
f.Add("hi") // ASCII f.Add("hi") // ASCII
f.Add("â") // latin1 f.Add("â") // latin1

View File

@@ -14,6 +14,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"runtime" "runtime"
"slices"
"strings" "strings"
"sync" "sync"
"testing" "testing"
@@ -1084,10 +1085,15 @@ func TestLoadFixed(t *testing.T) {
// So GMT+1 corresponds to -3600 in the Go zone, not +3600. // So GMT+1 corresponds to -3600 in the Go zone, not +3600.
name, offset := Now().In(loc).Zone() name, offset := Now().In(loc).Zone()
// The zone abbreviation is "-01" since tzdata-2016g, and "GMT+1" // The zone abbreviation is "-01" since tzdata-2016g, and "GMT+1"
// on earlier versions; we accept both. (Issue #17276). // on earlier versions; we accept both. (Issue 17276.)
if !(name == "GMT+1" || name == "-01") || offset != -1*60*60 { wantName := []string{"GMT+1", "-01"}
t.Errorf("Now().In(loc).Zone() = %q, %d, want %q or %q, %d", // The zone abbreviation may be "+01" on OpenBSD. (Issue 69840.)
name, offset, "GMT+1", "-01", -1*60*60) if runtime.GOOS == "openbsd" {
wantName = append(wantName, "+01")
}
if !slices.Contains(wantName, name) || offset != -1*60*60 {
t.Errorf("Now().In(loc).Zone() = %q, %d, want %q (one of), %d",
name, offset, wantName, -1*60*60)
} }
} }