diff --git a/VERSION b/VERSION index 66e6565b..139c590e 100644 --- a/VERSION +++ b/VERSION @@ -1,2 +1,2 @@ -go1.23.3 -time 2024-11-06T18:46:45Z +go1.23.4 +time 2024-11-27T20:27:20Z diff --git a/src/cmd/cgo/internal/testcarchive/carchive_test.go b/src/cmd/cgo/internal/testcarchive/carchive_test.go index a8eebead..c263b82d 100644 --- a/src/cmd/cgo/internal/testcarchive/carchive_test.go +++ b/src/cmd/cgo/internal/testcarchive/carchive_test.go @@ -33,7 +33,7 @@ import ( "unicode" ) -var globalSkip = func(t *testing.T) {} +var globalSkip = func(t testing.TB) {} // Program to run. var bin []string @@ -59,12 +59,12 @@ func TestMain(m *testing.M) { func testMain(m *testing.M) int { if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { - globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") } + globalSkip = func(t testing.TB) { t.Skip("short mode and $GO_BUILDER_NAME not set") } return m.Run() } if runtime.GOOS == "linux" { if _, err := os.Stat("/etc/alpine-release"); err == nil { - globalSkip = func(t *testing.T) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } + globalSkip = func(t testing.TB) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } return m.Run() } } @@ -1291,8 +1291,8 @@ func TestPreemption(t *testing.T) { } } -// Issue 59294. Test calling Go function from C after using some -// stack space. +// Issue 59294 and 68285. Test calling Go function from C after with +// various stack space. func TestDeepStack(t *testing.T) { globalSkip(t) testenv.MustHaveGoBuild(t) @@ -1350,6 +1350,53 @@ func TestDeepStack(t *testing.T) { } } +func BenchmarkCgoCallbackMainThread(b *testing.B) { + // Benchmark for calling into Go fron C main thread. + // See issue #68587. + // + // It uses a subprocess, which is a C binary that calls + // Go on the main thread b.N times. There is some overhead + // for launching the subprocess. It is probably fine when + // b.N is large. + + globalSkip(b) + testenv.MustHaveGoBuild(b) + testenv.MustHaveCGO(b) + testenv.MustHaveBuildMode(b, "c-archive") + + if !testWork { + defer func() { + os.Remove("testp10" + exeSuffix) + os.Remove("libgo10.a") + os.Remove("libgo10.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo10.a", "./libgo10") + out, err := cmd.CombinedOutput() + b.Logf("%v\n%s", cmd.Args, out) + if err != nil { + b.Fatal(err) + } + + ccArgs := append(cc, "-o", "testp10"+exeSuffix, "main10.c", "libgo10.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + b.Logf("%v\n%s", ccArgs, out) + if err != nil { + b.Fatal(err) + } + + argv := cmdToRun("./testp10") + argv = append(argv, fmt.Sprint(b.N)) + cmd = exec.Command(argv[0], argv[1:]...) + + b.ResetTimer() + err = cmd.Run() + if err != nil { + b.Fatal(err) + } +} + func TestSharedObject(t *testing.T) { // Test that we can put a Go c-archive into a C shared object. globalSkip(t) diff --git a/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go b/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go new file mode 100644 index 00000000..803a0fa5 --- /dev/null +++ b/src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export GoF +func GoF() {} + +func main() {} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go b/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go index acb08d90..3528bef6 100644 --- a/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go +++ b/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go @@ -6,9 +6,29 @@ package main import "runtime" +// extern void callGoWithVariousStack(int); import "C" func main() {} //export GoF -func GoF() { runtime.GC() } +func GoF(p int32) { + runtime.GC() + if p != 0 { + panic("panic") + } +} + +//export callGoWithVariousStackAndGoFrame +func callGoWithVariousStackAndGoFrame(p int32) { + if p != 0 { + defer func() { + e := recover() + if e == nil { + panic("did not panic") + } + runtime.GC() + }() + } + C.callGoWithVariousStack(C.int(p)); +} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/main10.c b/src/cmd/cgo/internal/testcarchive/testdata/main10.c new file mode 100644 index 00000000..53c3c83a --- /dev/null +++ b/src/cmd/cgo/internal/testcarchive/testdata/main10.c @@ -0,0 +1,22 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include + +#include "libgo10.h" + +int main(int argc, char **argv) { + int n, i; + + if (argc != 2) { + perror("wrong arg"); + return 2; + } + n = atoi(argv[1]); + for (i = 0; i < n; i++) + GoF(); + + return 0; +} diff --git a/src/cmd/cgo/internal/testcarchive/testdata/main9.c b/src/cmd/cgo/internal/testcarchive/testdata/main9.c index 95ad4dea..e641d8a8 100644 --- a/src/cmd/cgo/internal/testcarchive/testdata/main9.c +++ b/src/cmd/cgo/internal/testcarchive/testdata/main9.c @@ -6,19 +6,27 @@ void use(int *x) { (*x)++; } -void callGoFWithDeepStack() { +void callGoFWithDeepStack(int p) { int x[10000]; use(&x[0]); use(&x[9999]); - GoF(); + GoF(p); use(&x[0]); use(&x[9999]); } -int main() { - GoF(); // call GoF without using much stack - callGoFWithDeepStack(); // call GoF with a deep stack +void callGoWithVariousStack(int p) { + GoF(0); // call GoF without using much stack + callGoFWithDeepStack(p); // call GoF with a deep stack + GoF(0); // again on a shallow stack +} + +int main() { + callGoWithVariousStack(0); + + callGoWithVariousStackAndGoFrame(0); // normal execution + callGoWithVariousStackAndGoFrame(1); // panic and recover } diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go index ef17bc48..32f5a771 100644 --- a/src/cmd/compile/internal/escape/solve.go +++ b/src/cmd/compile/internal/escape/solve.go @@ -318,9 +318,10 @@ func containsClosure(f, c *ir.Func) bool { return false } - // Closures within function Foo are named like "Foo.funcN..." or "Foo-rangeN". - // TODO(mdempsky): Better way to recognize this. - fn := f.Sym().Name - cn := c.Sym().Name - return len(cn) > len(fn) && cn[:len(fn)] == fn && (cn[len(fn)] == '.' || cn[len(fn)] == '-') + for p := c.ClosureParent; p != nil; p = p.ClosureParent { + if p == f { + return true + } + } + return false } diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go index 7fe4445d..ffeddea0 100644 --- a/src/cmd/compile/internal/importer/gcimporter_test.go +++ b/src/cmd/compile/internal/importer/gcimporter_test.go @@ -582,6 +582,23 @@ func TestIssue25596(t *testing.T) { compileAndImportPkg(t, "issue25596") } +func TestIssue70394(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + pkg := compileAndImportPkg(t, "alias") + obj := lookupObj(t, pkg.Scope(), "A") + + typ := obj.Type() + if _, ok := typ.(*types2.Alias); !ok { + t.Fatalf("type of %s is %s, wanted an alias", obj, typ) + } +} + func importPkg(t *testing.T, path, srcDir string) *types2.Package { pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil) if err != nil { diff --git a/src/cmd/compile/internal/importer/testdata/alias.go b/src/cmd/compile/internal/importer/testdata/alias.go new file mode 100644 index 00000000..51492fc9 --- /dev/null +++ b/src/cmd/compile/internal/importer/testdata/alias.go @@ -0,0 +1,7 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testdata + +type A = int32 diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go index 7eda375b..9d267e6d 100644 --- a/src/cmd/compile/internal/importer/ureader.go +++ b/src/cmd/compile/internal/importer/ureader.go @@ -29,11 +29,9 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input pr := pkgReader{ PkgDecoder: input, - ctxt: ctxt, - imports: imports, - // Currently, the compiler panics when using Alias types. - // TODO(gri) set to true once this is fixed (issue #66873) - enableAlias: false, + ctxt: ctxt, + imports: imports, + enableAlias: true, posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)), pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)), diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index d0c8ee35..4fa9055b 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -51,6 +51,8 @@ import ( // the generated ODCLFUNC, but there is no // pointer from the Func back to the OMETHVALUE. type Func struct { + // if you add or remove a field, don't forget to update sizeof_test.go + miniNode Body Nodes @@ -76,6 +78,9 @@ type Func struct { // Populated during walk. Closures []*Func + // Parent of a closure + ClosureParent *Func + // Parents records the parent scope of each scope within a // function. The root scope (0) has no parent, so the i'th // scope's parent is stored at Parents[i-1]. @@ -512,6 +517,7 @@ func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func, fn.Nname.Defn = fn pkg.Funcs = append(pkg.Funcs, fn) + fn.ClosureParent = outerfn return fn } diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go index 68d28655..6331cceb 100644 --- a/src/cmd/compile/internal/ir/sizeof_test.go +++ b/src/cmd/compile/internal/ir/sizeof_test.go @@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 176, 296}, + {Func{}, 180, 304}, {Name{}, 96, 168}, } diff --git a/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/src/cmd/compile/internal/rangefunc/rangefunc_test.go index 97ab2543..e488c3cf 100644 --- a/src/cmd/compile/internal/rangefunc/rangefunc_test.go +++ b/src/cmd/compile/internal/rangefunc/rangefunc_test.go @@ -2099,3 +2099,27 @@ func TestTwoLevelReturnCheck(t *testing.T) { t.Errorf("Expected y=3, got y=%d\n", y) } } + +func Bug70035(s1, s2, s3 []string) string { + var c1 string + for v1 := range slices.Values(s1) { + var c2 string + for v2 := range slices.Values(s2) { + var c3 string + for v3 := range slices.Values(s3) { + c3 = c3 + v3 + } + c2 = c2 + v2 + c3 + } + c1 = c1 + v1 + c2 + } + return c1 +} + +func Test70035(t *testing.T) { + got := Bug70035([]string{"1", "2", "3"}, []string{"a", "b", "c"}, []string{"A", "B", "C"}) + want := "1aABCbABCcABC2aABCbABCcABC3aABCbABCcABC" + if got != want { + t.Errorf("got %v, want %v", got, want) + } +} diff --git a/src/cmd/trace/gstate.go b/src/cmd/trace/gstate.go index 638d4926..4b380db9 100644 --- a/src/cmd/trace/gstate.go +++ b/src/cmd/trace/gstate.go @@ -257,6 +257,10 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) { if gs.lastStopStack != trace.NoStack { stk = ctx.Stack(viewerFrames(gs.lastStopStack)) } + var endStk int + if stack != trace.NoStack { + endStk = ctx.Stack(viewerFrames(stack)) + } // Check invariants. if gs.startRunningTime == 0 { panic("silently broken trace or generator invariant (startRunningTime != 0) not held") @@ -270,6 +274,7 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) { Dur: ts.Sub(gs.startRunningTime), Resource: uint64(gs.executing), Stack: stk, + EndStack: endStk, }) // Flush completed ranges. diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 341e07ca..d1023d4e 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -72,6 +72,6 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, if err == syscall.EAGAIN { err = nil } - handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) + handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL && err != syscall.EOPNOTSUPP && err != syscall.ENOTSUP) return } diff --git a/src/net/sendfile_unix_alt.go b/src/net/sendfile_unix_alt.go index 9e46c4e6..4056856f 100644 --- a/src/net/sendfile_unix_alt.go +++ b/src/net/sendfile_unix_alt.go @@ -53,6 +53,9 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) { if err != nil { return 0, err, false } + if fi.Mode()&(fs.ModeSymlink|fs.ModeDevice|fs.ModeCharDevice|fs.ModeIrregular) != 0 { + return 0, nil, false + } remain = fi.Size() } diff --git a/src/net/sendfile_unix_test.go b/src/net/sendfile_unix_test.go new file mode 100644 index 00000000..79fb23b3 --- /dev/null +++ b/src/net/sendfile_unix_test.go @@ -0,0 +1,86 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package net + +import ( + "internal/testpty" + "io" + "os" + "sync" + "syscall" + "testing" +) + +// Issue 70763: test that we don't fail on sendfile from a tty. +func TestCopyFromTTY(t *testing.T) { + pty, ttyName, err := testpty.Open() + if err != nil { + t.Skipf("skipping test because pty open failed: %v", err) + } + defer pty.Close() + + // Use syscall.Open so that the tty is blocking. + ttyFD, err := syscall.Open(ttyName, syscall.O_RDWR, 0) + if err != nil { + t.Skipf("skipping test because tty open failed: %v", err) + } + defer syscall.Close(ttyFD) + + tty := os.NewFile(uintptr(ttyFD), "tty") + defer tty.Close() + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + ch := make(chan bool) + + const data = "data\n" + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + conn, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + defer conn.Close() + + buf := make([]byte, len(data)) + if _, err := io.ReadFull(conn, buf); err != nil { + t.Error(err) + } + + ch <- true + }() + + conn, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + wg.Add(1) + go func() { + defer wg.Done() + if _, err := pty.Write([]byte(data)); err != nil { + t.Error(err) + } + <-ch + if err := pty.Close(); err != nil { + t.Error(err) + } + }() + + lr := io.LimitReader(tty, int64(len(data))) + if _, err := io.Copy(conn, lr); err != nil { + t.Error(err) + } +} diff --git a/src/runtime/cgo/gcc_stack_unix.c b/src/runtime/cgo/gcc_stack_unix.c index fcb03d0d..df0049a4 100644 --- a/src/runtime/cgo/gcc_stack_unix.c +++ b/src/runtime/cgo/gcc_stack_unix.c @@ -31,10 +31,11 @@ x_cgo_getstackbound(uintptr bounds[2]) pthread_attr_get_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); // low address #else - // We don't know how to get the current stacks, so assume they are the - // same as the default stack bounds. - pthread_attr_getstacksize(&attr, &size); - addr = __builtin_frame_address(0) + 4096 - size; + // We don't know how to get the current stacks, leave it as + // 0 and the caller will use an estimate based on the current + // SP. + addr = 0; + size = 0; #endif pthread_attr_destroy(&attr); diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 68b1ebba..972de4fe 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -231,34 +231,6 @@ func cgocall(fn, arg unsafe.Pointer) int32 { func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { g0 := mp.g0 - inBound := sp > g0.stack.lo && sp <= g0.stack.hi - if mp.ncgo > 0 && !inBound { - // ncgo > 0 indicates that this M was in Go further up the stack - // (it called C and is now receiving a callback). - // - // !inBound indicates that we were called with SP outside the - // expected system stack bounds (C changed the stack out from - // under us between the cgocall and cgocallback?). - // - // It is not safe for the C call to change the stack out from - // under us, so throw. - - // Note that this case isn't possible for signal == true, as - // that is always passing a new M from needm. - - // Stack is bogus, but reset the bounds anyway so we can print. - hi := g0.stack.hi - lo := g0.stack.lo - g0.stack.hi = sp + 1024 - g0.stack.lo = sp - 32*1024 - g0.stackguard0 = g0.stack.lo + stackGuard - g0.stackguard1 = g0.stackguard0 - - print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]") - print("\n") - exit(2) - } - if !mp.isextra { // We allocated the stack for standard Ms. Don't replace the // stack bounds with estimated ones when we already initialized @@ -266,26 +238,37 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { return } - // This M does not have Go further up the stack. However, it may have - // previously called into Go, initializing the stack bounds. Between - // that call returning and now the stack may have changed (perhaps the - // C thread is running a coroutine library). We need to update the - // stack bounds for this case. + inBound := sp > g0.stack.lo && sp <= g0.stack.hi + if inBound && mp.g0StackAccurate { + // This M has called into Go before and has the stack bounds + // initialized. We have the accurate stack bounds, and the SP + // is in bounds. We expect it continues to run within the same + // bounds. + return + } + + // We don't have an accurate stack bounds (either it never calls + // into Go before, or we couldn't get the accurate bounds), or the + // current SP is not within the previous bounds (the stack may have + // changed between calls). We need to update the stack bounds. // // N.B. we need to update the stack bounds even if SP appears to - // already be in bounds. Our "bounds" may actually be estimated dummy - // bounds (below). The actual stack bounds could have shifted but still - // have partial overlap with our dummy bounds. If we failed to update - // in that case, we could find ourselves seemingly called near the - // bottom of the stack bounds, where we quickly run out of space. + // already be in bounds, if our bounds are estimated dummy bounds + // (below). We may be in a different region within the same actual + // stack bounds, but our estimates were not accurate. Or the actual + // stack bounds could have shifted but still have partial overlap with + // our dummy bounds. If we failed to update in that case, we could find + // ourselves seemingly called near the bottom of the stack bounds, where + // we quickly run out of space. // Set the stack bounds to match the current stack. If we don't // actually know how big the stack is, like we don't know how big any // scheduling stack is, but we assume there's at least 32 kB. If we // can get a more accurate stack bound from pthread, use that, provided - // it actually contains SP.. + // it actually contains SP. g0.stack.hi = sp + 1024 g0.stack.lo = sp - 32*1024 + mp.g0StackAccurate = false if !signal && _cgo_getstackbound != nil { // Don't adjust if called from the signal handler. // We are on the signal stack, not the pthread stack. @@ -296,12 +279,16 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds)) // getstackbound is an unsupported no-op on Windows. // + // On Unix systems, if the API to get accurate stack bounds is + // not available, it returns zeros. + // // Don't use these bounds if they don't contain SP. Perhaps we // were called by something not using the standard thread // stack. if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { g0.stack.lo = bounds[0] g0.stack.hi = bounds[1] + mp.g0StackAccurate = true } } g0.stackguard0 = g0.stack.lo + stackGuard @@ -319,6 +306,8 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { } sp := gp.m.g0.sched.sp // system sp saved by cgocallback. + oldStack := gp.m.g0.stack + oldAccurate := gp.m.g0StackAccurate callbackUpdateSystemStack(gp.m, sp, false) // The call from C is on gp.m's g0 stack, so we must ensure @@ -380,6 +369,12 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp)) gp.m.winsyscall = winsyscall + + // Restore the old g0 stack bounds + gp.m.g0.stack = oldStack + gp.m.g0.stackguard0 = oldStack.lo + stackGuard + gp.m.g0.stackguard1 = gp.m.g0.stackguard0 + gp.m.g0StackAccurate = oldAccurate } func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index d55da102..4502fa72 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1886,3 +1886,30 @@ func (m *TraceMap) PutString(s string) (uint64, bool) { func (m *TraceMap) Reset() { m.traceMap.reset() } + +func SetSpinInGCMarkDone(spin bool) { + gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin) +} + +func GCMarkDoneRestarted() bool { + // Only read this outside of the GC. If we're running during a GC, just report false. + mp := acquirem() + if gcphase != _GCoff { + releasem(mp) + return false + } + restarted := gcDebugMarkDone.restartedDueTo27993 + releasem(mp) + return restarted +} + +func GCMarkDoneResetRestartFlag() { + mp := acquirem() + for gcphase != _GCoff { + releasem(mp) + Gosched() + mp = acquirem() + } + gcDebugMarkDone.restartedDueTo27993 = false + releasem(mp) +} diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 908f6322..4b92b200 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -6,6 +6,8 @@ package runtime_test import ( "fmt" + "internal/testenv" + "internal/weak" "math/bits" "math/rand" "os" @@ -787,3 +789,78 @@ func TestMemoryLimitNoGCPercent(t *testing.T) { func TestMyGenericFunc(t *testing.T) { runtime.MyGenericFunc[int]() } + +func TestWeakToStrongMarkTermination(t *testing.T) { + testenv.MustHaveParallelism(t) + + type T struct { + a *int + b int + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + w := make([]weak.Pointer[T], 2048) + + // Make sure there's no out-standing GC from a previous test. + runtime.GC() + + // Create many objects with a weak pointers to them. + for i := range w { + x := new(T) + x.a = new(int) + w[i] = weak.Make(x) + } + + // Reset the restart flag. + runtime.GCMarkDoneResetRestartFlag() + + // Prevent mark termination from completing. + runtime.SetSpinInGCMarkDone(true) + + // Start a GC, and wait a little bit to get something spinning in mark termination. + // Simultaneously, fire off another goroutine to disable spinning. If everything's + // working correctly, then weak.Strong will block, so we need to make sure something + // prevents the GC from continuing to spin. + done := make(chan struct{}) + go func() { + runtime.GC() + done <- struct{}{} + }() + go func() { + time.Sleep(100 * time.Millisecond) + + // Let mark termination continue. + runtime.SetSpinInGCMarkDone(false) + }() + time.Sleep(10 * time.Millisecond) + + // Perform many weak->strong conversions in the critical window. + var wg sync.WaitGroup + for _, wp := range w { + wg.Add(1) + go func() { + defer wg.Done() + wp.Strong() + }() + } + + // Make sure the GC completes. + <-done + + // Make sure all the weak->strong conversions finish. + wg.Wait() + + // The bug is triggered if there's still mark work after gcMarkDone stops the world. + // + // This can manifest in one of two ways today: + // - An exceedingly rare crash in mark termination. + // - gcMarkDone restarts, as if issue #27993 is at play. + // + // Check for the latter. This is a fairly controlled environment, so #27993 is very + // unlikely to happen (it's already rare to begin with) but we'll always _appear_ to + // trigger the same bug if weak->strong conversions aren't properly coordinated with + // mark termination. + if runtime.GCMarkDoneRestarted() { + t.Errorf("gcMarkDone restarted") + } +} diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 432ace72..37383833 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -17,6 +17,7 @@ const ( lockRankDefer lockRankSweepWaiters lockRankAssistQueue + lockRankStrongFromWeakQueue lockRankSweep lockRankTestR lockRankTestW @@ -84,64 +85,65 @@ const lockRankLeafRank lockRank = 1000 // lockNames gives the names associated with each of the above ranks. var lockNames = []string{ - lockRankSysmon: "sysmon", - lockRankScavenge: "scavenge", - lockRankForcegc: "forcegc", - lockRankDefer: "defer", - lockRankSweepWaiters: "sweepWaiters", - lockRankAssistQueue: "assistQueue", - lockRankSweep: "sweep", - lockRankTestR: "testR", - lockRankTestW: "testW", - lockRankTimerSend: "timerSend", - lockRankAllocmW: "allocmW", - lockRankExecW: "execW", - lockRankCpuprof: "cpuprof", - lockRankPollCache: "pollCache", - lockRankPollDesc: "pollDesc", - lockRankWakeableSleep: "wakeableSleep", - lockRankHchan: "hchan", - lockRankAllocmR: "allocmR", - lockRankExecR: "execR", - lockRankSched: "sched", - lockRankAllg: "allg", - lockRankAllp: "allp", - lockRankNotifyList: "notifyList", - lockRankSudog: "sudog", - lockRankTimers: "timers", - lockRankTimer: "timer", - lockRankNetpollInit: "netpollInit", - lockRankRoot: "root", - lockRankItab: "itab", - lockRankReflectOffs: "reflectOffs", - lockRankUserArenaState: "userArenaState", - lockRankTraceBuf: "traceBuf", - lockRankTraceStrings: "traceStrings", - lockRankFin: "fin", - lockRankSpanSetSpine: "spanSetSpine", - lockRankMspanSpecial: "mspanSpecial", - lockRankTraceTypeTab: "traceTypeTab", - lockRankGcBitsArenas: "gcBitsArenas", - lockRankProfInsert: "profInsert", - lockRankProfBlock: "profBlock", - lockRankProfMemActive: "profMemActive", - lockRankProfMemFuture: "profMemFuture", - lockRankGscan: "gscan", - lockRankStackpool: "stackpool", - lockRankStackLarge: "stackLarge", - lockRankHchanLeaf: "hchanLeaf", - lockRankWbufSpans: "wbufSpans", - lockRankMheap: "mheap", - lockRankMheapSpecial: "mheapSpecial", - lockRankGlobalAlloc: "globalAlloc", - lockRankTrace: "trace", - lockRankTraceStackTab: "traceStackTab", - lockRankPanic: "panic", - lockRankDeadlock: "deadlock", - lockRankRaceFini: "raceFini", - lockRankAllocmRInternal: "allocmRInternal", - lockRankExecRInternal: "execRInternal", - lockRankTestRInternal: "testRInternal", + lockRankSysmon: "sysmon", + lockRankScavenge: "scavenge", + lockRankForcegc: "forcegc", + lockRankDefer: "defer", + lockRankSweepWaiters: "sweepWaiters", + lockRankAssistQueue: "assistQueue", + lockRankStrongFromWeakQueue: "strongFromWeakQueue", + lockRankSweep: "sweep", + lockRankTestR: "testR", + lockRankTestW: "testW", + lockRankTimerSend: "timerSend", + lockRankAllocmW: "allocmW", + lockRankExecW: "execW", + lockRankCpuprof: "cpuprof", + lockRankPollCache: "pollCache", + lockRankPollDesc: "pollDesc", + lockRankWakeableSleep: "wakeableSleep", + lockRankHchan: "hchan", + lockRankAllocmR: "allocmR", + lockRankExecR: "execR", + lockRankSched: "sched", + lockRankAllg: "allg", + lockRankAllp: "allp", + lockRankNotifyList: "notifyList", + lockRankSudog: "sudog", + lockRankTimers: "timers", + lockRankTimer: "timer", + lockRankNetpollInit: "netpollInit", + lockRankRoot: "root", + lockRankItab: "itab", + lockRankReflectOffs: "reflectOffs", + lockRankUserArenaState: "userArenaState", + lockRankTraceBuf: "traceBuf", + lockRankTraceStrings: "traceStrings", + lockRankFin: "fin", + lockRankSpanSetSpine: "spanSetSpine", + lockRankMspanSpecial: "mspanSpecial", + lockRankTraceTypeTab: "traceTypeTab", + lockRankGcBitsArenas: "gcBitsArenas", + lockRankProfInsert: "profInsert", + lockRankProfBlock: "profBlock", + lockRankProfMemActive: "profMemActive", + lockRankProfMemFuture: "profMemFuture", + lockRankGscan: "gscan", + lockRankStackpool: "stackpool", + lockRankStackLarge: "stackLarge", + lockRankHchanLeaf: "hchanLeaf", + lockRankWbufSpans: "wbufSpans", + lockRankMheap: "mheap", + lockRankMheapSpecial: "mheapSpecial", + lockRankGlobalAlloc: "globalAlloc", + lockRankTrace: "trace", + lockRankTraceStackTab: "traceStackTab", + lockRankPanic: "panic", + lockRankDeadlock: "deadlock", + lockRankRaceFini: "raceFini", + lockRankAllocmRInternal: "allocmRInternal", + lockRankExecRInternal: "execRInternal", + lockRankTestRInternal: "testRInternal", } func (rank lockRank) String() string { @@ -163,62 +165,63 @@ func (rank lockRank) String() string { // // Lock ranks that allow self-cycles list themselves. var lockPartialOrder [][]lockRank = [][]lockRank{ - lockRankSysmon: {}, - lockRankScavenge: {lockRankSysmon}, - lockRankForcegc: {lockRankSysmon}, - lockRankDefer: {}, - lockRankSweepWaiters: {}, - lockRankAssistQueue: {}, - lockRankSweep: {}, - lockRankTestR: {}, - lockRankTestW: {}, - lockRankTimerSend: {}, - lockRankAllocmW: {}, - lockRankExecW: {}, - lockRankCpuprof: {}, - lockRankPollCache: {}, - lockRankPollDesc: {}, - lockRankWakeableSleep: {}, - lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, - lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, - lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankNotifyList: {}, - lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, - lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, - lockRankRoot: {}, - lockRankItab: {}, - lockRankReflectOffs: {lockRankItab}, - lockRankUserArenaState: {}, - lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, - lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, - lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, - lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, - lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, - lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, - lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, - lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, - lockRankPanic: {}, - lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, - lockRankRaceFini: {lockRankPanic}, - lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, - lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, - lockRankTestRInternal: {lockRankTestR, lockRankTestW}, + lockRankSysmon: {}, + lockRankScavenge: {lockRankSysmon}, + lockRankForcegc: {lockRankSysmon}, + lockRankDefer: {}, + lockRankSweepWaiters: {}, + lockRankAssistQueue: {}, + lockRankStrongFromWeakQueue: {}, + lockRankSweep: {}, + lockRankTestR: {}, + lockRankTestW: {}, + lockRankTimerSend: {}, + lockRankAllocmW: {}, + lockRankExecW: {}, + lockRankCpuprof: {}, + lockRankPollCache: {}, + lockRankPollDesc: {}, + lockRankWakeableSleep: {}, + lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, + lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, + lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankNotifyList: {}, + lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, + lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, + lockRankRoot: {}, + lockRankItab: {}, + lockRankReflectOffs: {lockRankItab}, + lockRankUserArenaState: {}, + lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, + lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, + lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, + lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, + lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, + lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, + lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, + lockRankPanic: {}, + lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, + lockRankRaceFini: {lockRankPanic}, + lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, + lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, + lockRankTestRInternal: {lockRankTestR, lockRankTestW}, } diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 2654c696..f72edc2a 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -190,6 +190,7 @@ func gcinit() { work.markDoneSema = 1 lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters) lockInit(&work.assistQueue.lock, lockRankAssistQueue) + lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue) lockInit(&work.wbufSpans.lock, lockRankWbufSpans) } @@ -418,6 +419,26 @@ type workType struct { list gList } + // strongFromWeak controls how the GC interacts with weak->strong + // pointer conversions. + strongFromWeak struct { + // block is a flag set during mark termination that prevents + // new weak->strong conversions from executing by blocking the + // goroutine and enqueuing it onto q. + // + // Mutated only by one goroutine at a time in gcMarkDone, + // with globally-synchronizing events like forEachP and + // stopTheWorld. + block bool + + // q is a queue of goroutines that attempted to perform a + // weak->strong conversion during mark termination. + // + // Protected by lock. + lock mutex + q gQueue + } + // cycles is the number of completed GC cycles, where a GC // cycle is sweep termination, mark, mark termination, and // sweep. This differs from memstats.numgc, which is @@ -800,6 +821,19 @@ func gcStart(trigger gcTrigger) { // This is protected by markDoneSema. var gcMarkDoneFlushed uint32 +// gcDebugMarkDone contains fields used to debug/test mark termination. +var gcDebugMarkDone struct { + // spinAfterRaggedBarrier forces gcMarkDone to spin after it executes + // the ragged barrier. + spinAfterRaggedBarrier atomic.Bool + + // restartedDueTo27993 indicates that we restarted mark termination + // due to the bug described in issue #27993. + // + // Protected by worldsema. + restartedDueTo27993 bool +} + // gcMarkDone transitions the GC from mark to mark termination if all // reachable objects have been marked (that is, there are no grey // objects and can be no more in the future). Otherwise, it flushes @@ -842,6 +876,10 @@ top: // stop the world later, so acquire worldsema now. semacquire(&worldsema) + // Prevent weak->strong conversions from generating additional + // GC work. forEachP will guarantee that it is observed globally. + work.strongFromWeak.block = true + // Flush all local buffers and collect flushedWork flags. gcMarkDoneFlushed = 0 forEachP(waitReasonGCMarkTermination, func(pp *p) { @@ -872,6 +910,10 @@ top: goto top } + // For debugging/testing. + for gcDebugMarkDone.spinAfterRaggedBarrier.Load() { + } + // There was no global work, no local work, and no Ps // communicated work since we took markDoneSema. Therefore // there are no grey objects and no more objects can be @@ -910,6 +952,8 @@ top: } }) if restart { + gcDebugMarkDone.restartedDueTo27993 = true + getg().m.preemptoff = "" systemstack(func() { // Accumulate the time we were stopped before we had to start again. @@ -936,6 +980,11 @@ top: // start the world again. gcWakeAllAssists() + // Wake all blocked weak->strong conversions. These will run + // when we start the world again. + work.strongFromWeak.block = false + gcWakeAllStrongFromWeak() + // Likewise, release the transition lock. Blocked // workers and assists will run when we start the // world again. diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index a9105538..bfca2d10 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -2049,8 +2049,19 @@ func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { handle := (*atomic.Uintptr)(u) - // Prevent preemption. We want to make sure that another GC cycle can't start. + // Prevent preemption. We want to make sure that another GC cycle can't start + // and that work.strongFromWeak.block can't change out from under us. mp := acquirem() + + // Yield to the GC if necessary. + if work.strongFromWeak.block { + releasem(mp) + + // Try to park and wait for mark termination. + // N.B. gcParkStrongFromWeak calls acquirem before returning. + mp = gcParkStrongFromWeak() + } + p := handle.Load() if p == 0 { releasem(mp) @@ -2092,6 +2103,41 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer { return ptr } +// gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks. +func gcParkStrongFromWeak() *m { + // Prevent preemption as we check strongFromWeak, so it can't change out from under us. + mp := acquirem() + + for work.strongFromWeak.block { + lock(&work.strongFromWeak.lock) + releasem(mp) // N.B. Holding the lock prevents preemption. + + // Queue ourselves up. + work.strongFromWeak.q.pushBack(getg()) + + // Park. + goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2) + + // Re-acquire the current M since we're going to check the condition again. + mp = acquirem() + + // Re-check condition. We may have awoken in the next GC's mark termination phase. + } + return mp +} + +// gcWakeAllStrongFromWeak wakes all currently blocked weak->strong +// conversions. This is used at the end of a GC cycle. +// +// work.strongFromWeak.block must be false to prevent woken goroutines +// from immediately going back to sleep. +func gcWakeAllStrongFromWeak() { + lock(&work.strongFromWeak.lock) + list := work.strongFromWeak.q.popList() + injectglist(&list) + unlock(&work.strongFromWeak.lock) +} + // Retrieves or creates a weak pointer handle for the object p. func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // First try to retrieve without allocating. @@ -2126,8 +2172,14 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // Keep p alive for the duration of the function to ensure // that it cannot die while we're trying to do this. + // + // Same for handle, which is only stored in the special. + // There's a window where it might die if we don't keep it + // alive explicitly. Returning it here is probably good enough, + // but let's be defensive and explicit. See #70455. KeepAlive(p) - return s.handle + KeepAlive(handle) + return handle } // There was an existing handle. Free the special @@ -2147,7 +2199,10 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { // Keep p alive for the duration of the function to ensure // that it cannot die while we're trying to do this. + // + // Same for handle, just to be defensive. KeepAlive(p) + KeepAlive(handle) return handle } diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 1239b4a5..3391afc6 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -50,6 +50,7 @@ NONE < defer; NONE < sweepWaiters, assistQueue, + strongFromWeakQueue, sweep; # Test only @@ -66,6 +67,7 @@ assistQueue, hchan, pollDesc, # pollDesc can interact with timers, which can lock sched. scavenge, + strongFromWeakQueue, sweep, sweepWaiters, testR, diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 76c8b71a..d5cfaa39 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -2539,6 +2539,7 @@ func dropm() { g0.stack.lo = 0 g0.stackguard0 = 0 g0.stackguard1 = 0 + mp.g0StackAccurate = false putExtraM(mp) diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 4a789639..4a1ee37a 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -556,47 +556,48 @@ type m struct { _ uint32 // align next field to 8 bytes // Fields not known to debuggers. - procid uint64 // for debuggers, but offset not hard-coded - gsignal *g // signal-handling g - goSigStack gsignalStack // Go-allocated signal handling stack - sigmask sigset // storage for saved signal mask - tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) - mstartfn func() - curg *g // current running goroutine - caughtsig guintptr // goroutine running during fatal signal - p puintptr // attached p for executing go code (nil if not executing go code) - nextp puintptr - oldp puintptr // the p that was attached before executing a syscall - id int64 - mallocing int32 - throwing throwType - preemptoff string // if != "", keep curg running on this m - locks int32 - dying int32 - profilehz int32 - spinning bool // m is out of work and is actively looking for work - blocked bool // m is blocked on a note - newSigstack bool // minit on C thread called sigaltstack - printlock int8 - incgo bool // m is executing a cgo call - isextra bool // m is an extra m - isExtraInC bool // m is an extra m that is not executing Go code - isExtraInSig bool // m is an extra m in a signal handler - freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) - needextram bool - traceback uint8 - ncgocall uint64 // number of cgo calls in total - ncgo int32 // number of cgo calls currently in progress - cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily - cgoCallers *cgoCallers // cgo traceback if crashing in cgo call - park note - alllink *m // on allm - schedlink muintptr - lockedg guintptr - createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. - lockedExt uint32 // tracking for external LockOSThread - lockedInt uint32 // tracking for internal lockOSThread - nextwaitm muintptr // next m waiting for lock + procid uint64 // for debuggers, but offset not hard-coded + gsignal *g // signal-handling g + goSigStack gsignalStack // Go-allocated signal handling stack + sigmask sigset // storage for saved signal mask + tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) + mstartfn func() + curg *g // current running goroutine + caughtsig guintptr // goroutine running during fatal signal + p puintptr // attached p for executing go code (nil if not executing go code) + nextp puintptr + oldp puintptr // the p that was attached before executing a syscall + id int64 + mallocing int32 + throwing throwType + preemptoff string // if != "", keep curg running on this m + locks int32 + dying int32 + profilehz int32 + spinning bool // m is out of work and is actively looking for work + blocked bool // m is blocked on a note + newSigstack bool // minit on C thread called sigaltstack + printlock int8 + incgo bool // m is executing a cgo call + isextra bool // m is an extra m + isExtraInC bool // m is an extra m that is not executing Go code + isExtraInSig bool // m is an extra m in a signal handler + freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) + needextram bool + g0StackAccurate bool // whether the g0 stack has accurate bounds + traceback uint8 + ncgocall uint64 // number of cgo calls in total + ncgo int32 // number of cgo calls currently in progress + cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily + cgoCallers *cgoCallers // cgo traceback if crashing in cgo call + park note + alllink *m // on allm + schedlink muintptr + lockedg guintptr + createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. + lockedExt uint32 // tracking for external LockOSThread + lockedInt uint32 // tracking for internal lockOSThread + nextwaitm muintptr // next m waiting for lock mLockProfile mLockProfile // fields relating to runtime.lock contention profStack []uintptr // used for memory/block/mutex stack traces @@ -1095,6 +1096,7 @@ const ( waitReasonTraceProcStatus // "trace proc status" waitReasonPageTraceFlush // "page trace flush" waitReasonCoroutine // "coroutine" + waitReasonGCWeakToStrongWait // "GC weak to strong wait" ) var waitReasonStrings = [...]string{ @@ -1135,6 +1137,7 @@ var waitReasonStrings = [...]string{ waitReasonTraceProcStatus: "trace proc status", waitReasonPageTraceFlush: "page trace flush", waitReasonCoroutine: "coroutine", + waitReasonGCWeakToStrongWait: "GC weak to strong wait", } func (w waitReason) String() string { diff --git a/src/runtime/stack.go b/src/runtime/stack.go index cdf859a7..d43c6ace 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -69,7 +69,7 @@ const ( // to each stack below the usual guard area for OS-specific // purposes like signal handling. Used on Windows, Plan 9, // and iOS because they do not use a separate stack. - stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 + stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024 // The minimum size of stack used by Go code stackMin = 2048 @@ -1330,7 +1330,7 @@ func morestackc() { } // startingStackSize is the amount of stack that new goroutines start with. -// It is a power of 2, and between _FixedStack and maxstacksize, inclusive. +// It is a power of 2, and between fixedStack and maxstacksize, inclusive. // startingStackSize is updated every GC by tracking the average size of // stacks scanned during the GC. var startingStackSize uint32 = fixedStack diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go index 195b3e1c..7c4cb550 100644 --- a/src/runtime/traceruntime.go +++ b/src/runtime/traceruntime.go @@ -99,24 +99,26 @@ const ( traceBlockDebugCall traceBlockUntilGCEnds traceBlockSleep + traceBlockGCWeakToStrongWait ) var traceBlockReasonStrings = [...]string{ - traceBlockGeneric: "unspecified", - traceBlockForever: "forever", - traceBlockNet: "network", - traceBlockSelect: "select", - traceBlockCondWait: "sync.(*Cond).Wait", - traceBlockSync: "sync", - traceBlockChanSend: "chan send", - traceBlockChanRecv: "chan receive", - traceBlockGCMarkAssist: "GC mark assist wait for work", - traceBlockGCSweep: "GC background sweeper wait", - traceBlockSystemGoroutine: "system goroutine wait", - traceBlockPreempted: "preempted", - traceBlockDebugCall: "wait for debug call", - traceBlockUntilGCEnds: "wait until GC ends", - traceBlockSleep: "sleep", + traceBlockGeneric: "unspecified", + traceBlockForever: "forever", + traceBlockNet: "network", + traceBlockSelect: "select", + traceBlockCondWait: "sync.(*Cond).Wait", + traceBlockSync: "sync", + traceBlockChanSend: "chan send", + traceBlockChanRecv: "chan receive", + traceBlockGCMarkAssist: "GC mark assist wait for work", + traceBlockGCSweep: "GC background sweeper wait", + traceBlockSystemGoroutine: "system goroutine wait", + traceBlockPreempted: "preempted", + traceBlockDebugCall: "wait for debug call", + traceBlockUntilGCEnds: "wait until GC ends", + traceBlockSleep: "sleep", + traceBlockGCWeakToStrongWait: "GC weak to strong wait", } // traceGoStopReason is an enumeration of reasons a goroutine might yield. diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go index b3554d34..bd82b515 100644 --- a/src/syscall/dll_windows.go +++ b/src/syscall/dll_windows.go @@ -42,6 +42,7 @@ func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a // Deprecated: Use [SyscallN] instead. func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno) +//go:noescape func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) func loadlibrary(filename *uint16) (handle uintptr, err Errno) func loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle uintptr, err Errno) diff --git a/src/syscall/syscall_windows_test.go b/src/syscall/syscall_windows_test.go index f67e8991..a6c6eff3 100644 --- a/src/syscall/syscall_windows_test.go +++ b/src/syscall/syscall_windows_test.go @@ -213,6 +213,51 @@ func TestGetStartupInfo(t *testing.T) { } } +func TestSyscallAllocations(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + + // Test that syscall.SyscallN arguments do not escape. + // The function used (in this case GetVersion) doesn't matter + // as long as it is always available and doesn't panic. + h, err := syscall.LoadLibrary("kernel32.dll") + if err != nil { + t.Fatal(err) + } + defer syscall.FreeLibrary(h) + proc, err := syscall.GetProcAddress(h, "GetVersion") + if err != nil { + t.Fatal(err) + } + + testAllocs := func(t *testing.T, name string, fn func() error) { + t.Run(name, func(t *testing.T) { + n := int(testing.AllocsPerRun(10, func() { + if err := fn(); err != nil { + t.Fatalf("%s: %v", name, err) + } + })) + if n > 0 { + t.Errorf("allocs = %d, want 0", n) + } + }) + } + + testAllocs(t, "SyscallN", func() error { + r0, _, e1 := syscall.SyscallN(proc, 0, 0, 0) + if r0 == 0 { + return syscall.Errno(e1) + } + return nil + }) + testAllocs(t, "Syscall", func() error { + r0, _, e1 := syscall.Syscall(proc, 3, 0, 0, 0) + if r0 == 0 { + return syscall.Errno(e1) + } + return nil + }) +} + func FuzzUTF16FromString(f *testing.F) { f.Add("hi") // ASCII f.Add("รข") // latin1 diff --git a/src/time/time_test.go b/src/time/time_test.go index 70eb6147..c12b9117 100644 --- a/src/time/time_test.go +++ b/src/time/time_test.go @@ -14,6 +14,7 @@ import ( "math/rand" "os" "runtime" + "slices" "strings" "sync" "testing" @@ -1084,10 +1085,15 @@ func TestLoadFixed(t *testing.T) { // So GMT+1 corresponds to -3600 in the Go zone, not +3600. name, offset := Now().In(loc).Zone() // The zone abbreviation is "-01" since tzdata-2016g, and "GMT+1" - // on earlier versions; we accept both. (Issue #17276). - if !(name == "GMT+1" || name == "-01") || offset != -1*60*60 { - t.Errorf("Now().In(loc).Zone() = %q, %d, want %q or %q, %d", - name, offset, "GMT+1", "-01", -1*60*60) + // on earlier versions; we accept both. (Issue 17276.) + wantName := []string{"GMT+1", "-01"} + // The zone abbreviation may be "+01" on OpenBSD. (Issue 69840.) + if runtime.GOOS == "openbsd" { + wantName = append(wantName, "+01") + } + if !slices.Contains(wantName, name) || offset != -1*60*60 { + t.Errorf("Now().In(loc).Zone() = %q, %d, want %q (one of), %d", + name, offset, wantName, -1*60*60) } }