Update to go1.24.5
This commit is contained in:
4
VERSION
4
VERSION
@@ -1,2 +1,2 @@
|
||||
go1.24.4
|
||||
time 2025-05-29T19:37:36Z
|
||||
go1.24.5
|
||||
time 2025-07-02T21:47:15Z
|
||||
|
||||
@@ -224,6 +224,10 @@ use and validate the CRT parameters in the encoded private key. This behavior
|
||||
can be controlled with the `x509rsacrt` setting. Using `x509rsacrt=0` restores
|
||||
the Go 1.23 behavior.
|
||||
|
||||
Go 1.24.5 disabled build information stamping when multiple VCS are detected due
|
||||
to concerns around VCS injection attacks. This behavior can be renabled with the
|
||||
setting `allowmultiplevcs=1`.
|
||||
|
||||
### Go 1.23
|
||||
|
||||
Go 1.23 changed the channels created by package time to be unbuffered
|
||||
|
||||
@@ -42,6 +42,8 @@ func TestASAN(t *testing.T) {
|
||||
{src: "asan_global3_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global3_fail.go:13"},
|
||||
{src: "asan_global4_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global4_fail.go:21"},
|
||||
{src: "asan_global5.go"},
|
||||
{src: "asan_global_asm"},
|
||||
{src: "asan_global_asm2_fail", memoryAccessError: "global-buffer-overflow", errorLocation: "main.go:17"},
|
||||
{src: "arena_fail.go", memoryAccessError: "use-after-poison", errorLocation: "arena_fail.go:26", experiments: []string{"arenas"}},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
|
||||
@@ -536,7 +536,7 @@ func (c *config) checkRuntime() (skip bool, err error) {
|
||||
|
||||
// srcPath returns the path to the given file relative to this test's source tree.
|
||||
func srcPath(path string) string {
|
||||
return filepath.Join("testdata", path)
|
||||
return "./testdata/" + path
|
||||
}
|
||||
|
||||
// A tempDir manages a temporary directory within a test.
|
||||
|
||||
8
src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s
vendored
Normal file
8
src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/asm.s
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA ·x(SB)/8, $123
|
||||
GLOBL ·x(SB), NOPTR, $8
|
||||
11
src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go
vendored
Normal file
11
src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm/main.go
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
var x uint64
|
||||
|
||||
func main() {
|
||||
println(x)
|
||||
}
|
||||
8
src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s
vendored
Normal file
8
src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/asm.s
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA ·x(SB)/8, $123
|
||||
GLOBL ·x(SB), NOPTR, $8
|
||||
20
src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go
vendored
Normal file
20
src/cmd/cgo/internal/testsanitizers/testdata/asan_global_asm2_fail/main.go
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import "unsafe"
|
||||
|
||||
var x uint64
|
||||
|
||||
func main() {
|
||||
bar(&x)
|
||||
}
|
||||
|
||||
func bar(a *uint64) {
|
||||
p := (*uint64)(unsafe.Add(unsafe.Pointer(a), 1*unsafe.Sizeof(uint64(1))))
|
||||
if *p == 10 { // BOOM
|
||||
println("its value is 10")
|
||||
}
|
||||
}
|
||||
@@ -886,8 +886,8 @@ func init() {
|
||||
inputs: []regMask{buildReg("DI")},
|
||||
clobbers: buildReg("DI"),
|
||||
},
|
||||
faultOnNilArg0: true,
|
||||
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
|
||||
//faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point
|
||||
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
|
||||
},
|
||||
|
||||
// arg0 = address of memory to zero
|
||||
@@ -924,10 +924,10 @@ func init() {
|
||||
inputs: []regMask{buildReg("DI"), buildReg("SI")},
|
||||
clobbers: buildReg("DI SI X0"), // uses X0 as a temporary
|
||||
},
|
||||
clobberFlags: true,
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
|
||||
clobberFlags: true,
|
||||
//faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point
|
||||
//faultOnNilArg1: true,
|
||||
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
|
||||
},
|
||||
|
||||
// arg0 = destination pointer
|
||||
|
||||
@@ -536,8 +536,8 @@ func init() {
|
||||
inputs: []regMask{buildReg("R20")},
|
||||
clobbers: buildReg("R16 R17 R20 R30"),
|
||||
},
|
||||
faultOnNilArg0: true,
|
||||
unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts
|
||||
//faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point
|
||||
unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts
|
||||
},
|
||||
|
||||
// large zeroing
|
||||
@@ -577,9 +577,9 @@ func init() {
|
||||
inputs: []regMask{buildReg("R21"), buildReg("R20")},
|
||||
clobbers: buildReg("R16 R17 R20 R21 R26 R30"),
|
||||
},
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
|
||||
//faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point
|
||||
//faultOnNilArg1: true,
|
||||
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
|
||||
},
|
||||
|
||||
// large move
|
||||
|
||||
@@ -13777,11 +13777,10 @@ var opcodeTable = [...]opInfo{
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DUFFZERO",
|
||||
auxType: auxInt64,
|
||||
argLen: 2,
|
||||
faultOnNilArg0: true,
|
||||
unsafePoint: true,
|
||||
name: "DUFFZERO",
|
||||
auxType: auxInt64,
|
||||
argLen: 2,
|
||||
unsafePoint: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 128}, // DI
|
||||
@@ -13851,13 +13850,11 @@ var opcodeTable = [...]opInfo{
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DUFFCOPY",
|
||||
auxType: auxInt64,
|
||||
argLen: 3,
|
||||
clobberFlags: true,
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
unsafePoint: true,
|
||||
name: "DUFFCOPY",
|
||||
auxType: auxInt64,
|
||||
argLen: 3,
|
||||
clobberFlags: true,
|
||||
unsafePoint: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 128}, // DI
|
||||
@@ -22970,11 +22967,10 @@ var opcodeTable = [...]opInfo{
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DUFFZERO",
|
||||
auxType: auxInt64,
|
||||
argLen: 2,
|
||||
faultOnNilArg0: true,
|
||||
unsafePoint: true,
|
||||
name: "DUFFZERO",
|
||||
auxType: auxInt64,
|
||||
argLen: 2,
|
||||
unsafePoint: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 1048576}, // R20
|
||||
@@ -22996,12 +22992,10 @@ var opcodeTable = [...]opInfo{
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DUFFCOPY",
|
||||
auxType: auxInt64,
|
||||
argLen: 3,
|
||||
faultOnNilArg0: true,
|
||||
faultOnNilArg1: true,
|
||||
unsafePoint: true,
|
||||
name: "DUFFCOPY",
|
||||
auxType: auxInt64,
|
||||
argLen: 3,
|
||||
unsafePoint: true,
|
||||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 2097152}, // R21
|
||||
|
||||
@@ -1470,6 +1470,11 @@ func GetPPC64Shiftme(auxint int64) int64 {
|
||||
// operation. Masks can also extend from the msb and wrap to
|
||||
// the lsb too. That is, the valid masks are 32 bit strings
|
||||
// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
|
||||
//
|
||||
// Note: This ignores the upper 32 bits of the input. When a
|
||||
// zero extended result is desired (e.g a 64 bit result), the
|
||||
// user must verify the upper 32 bits are 0 and the mask is
|
||||
// contiguous (that is, non-wrapping).
|
||||
func isPPC64WordRotateMask(v64 int64) bool {
|
||||
// Isolate rightmost 1 (if none 0) and add.
|
||||
v := uint32(v64)
|
||||
@@ -1480,6 +1485,16 @@ func isPPC64WordRotateMask(v64 int64) bool {
|
||||
return (v&vp == 0 || vn&vpn == 0) && v != 0
|
||||
}
|
||||
|
||||
// Test if this mask is a valid, contiguous bitmask which can be
|
||||
// represented by a RLWNM mask and also clears the upper 32 bits
|
||||
// of the register.
|
||||
func isPPC64WordRotateMaskNonWrapping(v64 int64) bool {
|
||||
// Isolate rightmost 1 (if none 0) and add.
|
||||
v := uint32(v64)
|
||||
vp := (v & -v) + v
|
||||
return (v&vp == 0) && v != 0 && uint64(uint32(v64)) == uint64(v64)
|
||||
}
|
||||
|
||||
// Compress mask and shift into single value of the form
|
||||
// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can
|
||||
// be used to regenerate the input mask.
|
||||
@@ -1589,7 +1604,7 @@ func mergePPC64AndSrdi(m, s int64) int64 {
|
||||
if rv&uint64(mask) != 0 {
|
||||
return 0
|
||||
}
|
||||
if !isPPC64WordRotateMask(mask) {
|
||||
if !isPPC64WordRotateMaskNonWrapping(mask) {
|
||||
return 0
|
||||
}
|
||||
return encodePPC64RotateMask((32-s)&31, mask, 32)
|
||||
@@ -1604,7 +1619,7 @@ func mergePPC64AndSldi(m, s int64) int64 {
|
||||
if rv&uint64(mask) != 0 {
|
||||
return 0
|
||||
}
|
||||
if !isPPC64WordRotateMask(mask) {
|
||||
if !isPPC64WordRotateMaskNonWrapping(mask) {
|
||||
return 0
|
||||
}
|
||||
return encodePPC64RotateMask(s&31, mask, 32)
|
||||
|
||||
@@ -114,7 +114,11 @@ func Init() {
|
||||
fsys.Bind(Dir(), filepath.Join(cfg.GOROOT, "src/crypto/internal/fips140"))
|
||||
}
|
||||
|
||||
if cfg.Experiment.BoringCrypto && Enabled() {
|
||||
// ExperimentErr != nil if GOEXPERIMENT failed to parse. Typically
|
||||
// cmd/go main will exit in this case, but it is allowed during
|
||||
// toolchain selection, as the GOEXPERIMENT may be valid for the
|
||||
// selected toolchain version.
|
||||
if cfg.ExperimentErr == nil && cfg.Experiment.BoringCrypto && Enabled() {
|
||||
base.Fatalf("go: cannot use GOFIPS140 with GOEXPERIMENT=boringcrypto")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2534,7 +2534,6 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) {
|
||||
var repoDir string
|
||||
var vcsCmd *vcs.Cmd
|
||||
var err error
|
||||
const allowNesting = true
|
||||
|
||||
wantVCS := false
|
||||
switch cfg.BuildBuildvcs {
|
||||
@@ -2554,7 +2553,7 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) {
|
||||
// (so the bootstrap toolchain packages don't even appear to be in GOROOT).
|
||||
goto omitVCS
|
||||
}
|
||||
repoDir, vcsCmd, err = vcs.FromDir(base.Cwd(), "", allowNesting)
|
||||
repoDir, vcsCmd, err = vcs.FromDir(base.Cwd(), "")
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
setVCSError(err)
|
||||
return
|
||||
@@ -2577,10 +2576,11 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) {
|
||||
}
|
||||
if repoDir != "" && vcsCmd.Status != nil {
|
||||
// Check that the current directory, package, and module are in the same
|
||||
// repository. vcs.FromDir allows nested Git repositories, but nesting
|
||||
// is not allowed for other VCS tools. The current directory may be outside
|
||||
// p.Module.Dir when a workspace is used.
|
||||
pkgRepoDir, _, err := vcs.FromDir(p.Dir, "", allowNesting)
|
||||
// repository. vcs.FromDir disallows nested VCS and multiple VCS in the
|
||||
// same repository, unless the GODEBUG allowmultiplevcs is set. The
|
||||
// current directory may be outside p.Module.Dir when a workspace is
|
||||
// used.
|
||||
pkgRepoDir, _, err := vcs.FromDir(p.Dir, "")
|
||||
if err != nil {
|
||||
setVCSError(err)
|
||||
return
|
||||
@@ -2592,7 +2592,7 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) {
|
||||
}
|
||||
goto omitVCS
|
||||
}
|
||||
modRepoDir, _, err := vcs.FromDir(p.Module.Dir, "", allowNesting)
|
||||
modRepoDir, _, err := vcs.FromDir(p.Module.Dir, "")
|
||||
if err != nil {
|
||||
setVCSError(err)
|
||||
return
|
||||
|
||||
@@ -230,7 +230,7 @@ func LookupLocal(ctx context.Context, path string) Repo {
|
||||
|
||||
return lookupLocalCache.Do(path, func() Repo {
|
||||
return newCachingRepo(ctx, path, func(ctx context.Context) (Repo, error) {
|
||||
repoDir, vcsCmd, err := vcs.FromDir(path, "", true)
|
||||
repoDir, vcsCmd, err := vcs.FromDir(path, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/godebug"
|
||||
"internal/lazyregexp"
|
||||
"internal/singleflight"
|
||||
"io/fs"
|
||||
@@ -839,11 +840,13 @@ type vcsPath struct {
|
||||
schemelessRepo bool // if true, the repo pattern lacks a scheme
|
||||
}
|
||||
|
||||
var allowmultiplevcs = godebug.New("allowmultiplevcs")
|
||||
|
||||
// FromDir inspects dir and its parents to determine the
|
||||
// version control system and code repository to use.
|
||||
// If no repository is found, FromDir returns an error
|
||||
// equivalent to os.ErrNotExist.
|
||||
func FromDir(dir, srcRoot string, allowNesting bool) (repoDir string, vcsCmd *Cmd, err error) {
|
||||
func FromDir(dir, srcRoot string) (repoDir string, vcsCmd *Cmd, err error) {
|
||||
// Clean and double-check that dir is in (a subdirectory of) srcRoot.
|
||||
dir = filepath.Clean(dir)
|
||||
if srcRoot != "" {
|
||||
@@ -857,21 +860,28 @@ func FromDir(dir, srcRoot string, allowNesting bool) (repoDir string, vcsCmd *Cm
|
||||
for len(dir) > len(srcRoot) {
|
||||
for _, vcs := range vcsList {
|
||||
if isVCSRoot(dir, vcs.RootNames) {
|
||||
// Record first VCS we find.
|
||||
// If allowNesting is false (as it is in GOPATH), keep looking for
|
||||
// repositories in parent directories and report an error if one is
|
||||
// found to mitigate VCS injection attacks.
|
||||
if vcsCmd == nil {
|
||||
// Record first VCS we find.
|
||||
vcsCmd = vcs
|
||||
repoDir = dir
|
||||
if allowNesting {
|
||||
if allowmultiplevcs.Value() == "1" {
|
||||
allowmultiplevcs.IncNonDefault()
|
||||
return repoDir, vcsCmd, nil
|
||||
}
|
||||
// If allowmultiplevcs is not set, keep looking for
|
||||
// repositories in current and parent directories and report
|
||||
// an error if one is found to mitigate VCS injection
|
||||
// attacks.
|
||||
continue
|
||||
}
|
||||
// Otherwise, we have one VCS inside a different VCS.
|
||||
return "", nil, fmt.Errorf("directory %q uses %s, but parent %q uses %s",
|
||||
repoDir, vcsCmd.Cmd, dir, vcs.Cmd)
|
||||
if vcsCmd == vcsGit && vcs == vcsGit {
|
||||
// Nested Git is allowed, as this is how things like
|
||||
// submodules work. Git explicitly protects against
|
||||
// injection against itself.
|
||||
continue
|
||||
}
|
||||
return "", nil, fmt.Errorf("multiple VCS detected: %s in %q, and %s in %q",
|
||||
vcsCmd.Cmd, repoDir, vcs.Cmd, dir)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -239,7 +239,7 @@ func TestFromDir(t *testing.T) {
|
||||
}
|
||||
|
||||
wantRepoDir := filepath.Dir(dir)
|
||||
gotRepoDir, gotVCS, err := FromDir(dir, tempDir, false)
|
||||
gotRepoDir, gotVCS, err := FromDir(dir, tempDir)
|
||||
if err != nil {
|
||||
t.Errorf("FromDir(%q, %q): %v", dir, tempDir, err)
|
||||
continue
|
||||
|
||||
54
src/cmd/go/testdata/script/test_multivcs.txt
vendored
Normal file
54
src/cmd/go/testdata/script/test_multivcs.txt
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
# To avoid VCS injection attacks, we should not accept multiple different VCS metadata
|
||||
# folders within a single module (either in the same directory, or nested in different
|
||||
# directories.)
|
||||
#
|
||||
# This behavior should be disabled by setting the allowmultiplevcs GODEBUG.
|
||||
|
||||
[short] skip
|
||||
[!git] skip
|
||||
|
||||
cd samedir
|
||||
|
||||
exec git init .
|
||||
|
||||
# Without explicitly requesting buildvcs, the go command should silently continue
|
||||
# without determining the correct VCS.
|
||||
go test -c -o $devnull .
|
||||
|
||||
# If buildvcs is explicitly requested, we expect the go command to fail
|
||||
! go test -buildvcs -c -o $devnull .
|
||||
stderr '^error obtaining VCS status: multiple VCS detected:'
|
||||
|
||||
env GODEBUG=allowmultiplevcs=1
|
||||
go test -buildvcs -c -o $devnull .
|
||||
|
||||
env GODEBUG=
|
||||
cd ../nested
|
||||
exec git init .
|
||||
# cd a
|
||||
go test -c -o $devnull ./a
|
||||
! go test -buildvcs -c -o $devnull ./a
|
||||
stderr '^error obtaining VCS status: multiple VCS detected:'
|
||||
# allowmultiplevcs doesn't disable the check that the current directory, package, and
|
||||
# module are in the same repository.
|
||||
env GODEBUG=allowmultiplevcs=1
|
||||
! go test -buildvcs -c -o $devnull ./a
|
||||
stderr '^error obtaining VCS status: main package is in repository'
|
||||
|
||||
-- samedir/go.mod --
|
||||
module example
|
||||
|
||||
go 1.18
|
||||
-- samedir/example.go --
|
||||
package main
|
||||
-- samedir/.bzr/test --
|
||||
hello
|
||||
|
||||
-- nested/go.mod --
|
||||
module example
|
||||
|
||||
go 1.18
|
||||
-- nested/a/example.go --
|
||||
package main
|
||||
-- nested/a/.bzr/test --
|
||||
hello
|
||||
@@ -9,25 +9,35 @@ cd root
|
||||
go mod init example.com/root
|
||||
exec git init
|
||||
|
||||
# Nesting repositories in parent directories are ignored, as the current
|
||||
# directory main package, and containing main module are in the same repository.
|
||||
# This is an error in GOPATH mode (to prevent VCS injection), but for modules,
|
||||
# we assume users have control over repositories they've checked out.
|
||||
|
||||
# Nesting repositories in parent directories are an error, to prevent VCS injection.
|
||||
# This can be disabled with the allowmultiplevcs GODEBUG.
|
||||
mkdir hgsub
|
||||
cd hgsub
|
||||
exec hg init
|
||||
cp ../../main.go main.go
|
||||
! go build
|
||||
stderr '^error obtaining VCS status: multiple VCS detected: hg in ".*hgsub", and git in ".*root"$'
|
||||
stderr '^\tUse -buildvcs=false to disable VCS stamping.$'
|
||||
env GODEBUG=allowmultiplevcs=1
|
||||
! go build
|
||||
stderr '^error obtaining VCS status: main module is in repository ".*root" but current directory is in repository ".*hgsub"$'
|
||||
stderr '^\tUse -buildvcs=false to disable VCS stamping.$'
|
||||
go build -buildvcs=false
|
||||
env GODEBUG=
|
||||
go mod init example.com/root/hgsub
|
||||
! go build
|
||||
stderr '^error obtaining VCS status: multiple VCS detected: hg in ".*hgsub", and git in ".*root"$'
|
||||
stderr '^\tUse -buildvcs=false to disable VCS stamping.$'
|
||||
env GODEBUG=allowmultiplevcs=1
|
||||
go build
|
||||
env GODEBUG=
|
||||
cd ..
|
||||
|
||||
# It's an error to build a package from a nested Git repository if the package
|
||||
# is in a separate repository from the current directory or from the module
|
||||
# root directory.
|
||||
# root directory. Otherwise nested Git repositories are allowed, as this is
|
||||
# how Git implements submodules (and protects against Git based VCS injection.)
|
||||
mkdir gitsub
|
||||
cd gitsub
|
||||
exec git init
|
||||
|
||||
@@ -253,6 +253,12 @@ type Loader struct {
|
||||
|
||||
WasmExports []Sym
|
||||
|
||||
// sizeFixups records symbols that we need to fix up the size
|
||||
// after loading. It is very rarely needed, only for a DATA symbol
|
||||
// and a BSS symbol with the same name, and the BSS symbol has
|
||||
// larger size.
|
||||
sizeFixups []symAndSize
|
||||
|
||||
flags uint32
|
||||
|
||||
strictDupMsgs int // number of strict-dup warning/errors, when FlagStrictDups is enabled
|
||||
@@ -469,18 +475,17 @@ func (st *loadState) addSym(name string, ver int, r *oReader, li uint32, kind in
|
||||
// In summary, the "overwrite" variable and the final result are
|
||||
//
|
||||
// new sym old sym result
|
||||
// ---------------------------------------------
|
||||
// -------------------------------------------------------
|
||||
// TEXT BSS new wins
|
||||
// DATA DATA ERROR
|
||||
// DATA lg/eq BSS sm/eq new wins
|
||||
// DATA small BSS large ERROR
|
||||
// BSS large DATA small ERROR
|
||||
// DATA small BSS large merge: new with larger size
|
||||
// BSS large DATA small merge: old with larger size
|
||||
// BSS large BSS small new wins
|
||||
// BSS sm/eq D/B lg/eq old wins
|
||||
// BSS TEXT old wins
|
||||
oldtyp := sym.AbiSymKindToSymKind[objabi.SymKind(oldsym.Type())]
|
||||
newtyp := sym.AbiSymKindToSymKind[objabi.SymKind(osym.Type())]
|
||||
oldIsText := oldtyp.IsText()
|
||||
newIsText := newtyp.IsText()
|
||||
oldHasContent := oldr.DataSize(oldli) != 0
|
||||
newHasContent := r.DataSize(li) != 0
|
||||
@@ -488,12 +493,28 @@ func (st *loadState) addSym(name string, ver int, r *oReader, li uint32, kind in
|
||||
newIsBSS := newtyp.IsData() && !newHasContent
|
||||
switch {
|
||||
case newIsText && oldIsBSS,
|
||||
newHasContent && oldIsBSS && sz >= oldsz,
|
||||
newHasContent && oldIsBSS,
|
||||
newIsBSS && oldIsBSS && sz > oldsz:
|
||||
// new symbol overwrites old symbol.
|
||||
l.objSyms[oldi] = objSym{r.objidx, li}
|
||||
case newIsBSS && (oldsz >= sz || oldIsText):
|
||||
if oldsz > sz {
|
||||
// If the BSS symbol has a larger size, expand the data
|
||||
// symbol's size so access from the BSS side cannot overrun.
|
||||
// It is hard to modify the symbol size until all Go objects
|
||||
// (potentially read-only) are loaded, so we record it in
|
||||
// a fixup table and apply them later. This is very rare.
|
||||
// One case is a global variable with a Go declaration and an
|
||||
// assembly definition, which typically have the same size,
|
||||
// but in ASAN mode the Go declaration has a larger size due
|
||||
// to the inserted red zone.
|
||||
l.sizeFixups = append(l.sizeFixups, symAndSize{oldi, uint32(oldsz)})
|
||||
}
|
||||
case newIsBSS:
|
||||
// old win, just ignore the new symbol.
|
||||
if sz > oldsz {
|
||||
// See the comment above for sizeFixups.
|
||||
l.sizeFixups = append(l.sizeFixups, symAndSize{oldi, uint32(sz)})
|
||||
}
|
||||
default:
|
||||
log.Fatalf("duplicated definition of symbol %s, from %s (type %s size %d) and %s (type %s size %d)", name, r.unit.Lib.Pkg, newtyp, sz, oldr.unit.Lib.Pkg, oldtyp, oldsz)
|
||||
}
|
||||
@@ -2285,6 +2306,10 @@ func (l *Loader) LoadSyms(arch *sys.Arch) {
|
||||
st.preloadSyms(r, hashedDef)
|
||||
st.preloadSyms(r, nonPkgDef)
|
||||
}
|
||||
for _, sf := range l.sizeFixups {
|
||||
pp := l.cloneToExternal(sf.sym)
|
||||
pp.size = int64(sf.size)
|
||||
}
|
||||
for _, vr := range st.linknameVarRefs {
|
||||
l.checkLinkname(vr.pkg, vr.name, vr.sym)
|
||||
}
|
||||
@@ -2490,7 +2515,7 @@ func topLevelSym(sname string, skind sym.SymKind) bool {
|
||||
// a symbol originally discovered as part of an object file, it's
|
||||
// easier to do this if we make the updates to an external symbol
|
||||
// payload.
|
||||
func (l *Loader) cloneToExternal(symIdx Sym) {
|
||||
func (l *Loader) cloneToExternal(symIdx Sym) *extSymPayload {
|
||||
if l.IsExternal(symIdx) {
|
||||
panic("sym is already external, no need for clone")
|
||||
}
|
||||
@@ -2542,6 +2567,8 @@ func (l *Loader) cloneToExternal(symIdx Sym) {
|
||||
// Some attributes were encoded in the object file. Copy them over.
|
||||
l.SetAttrDuplicateOK(symIdx, r.Sym(li).Dupok())
|
||||
l.SetAttrShared(symIdx, r.Shared())
|
||||
|
||||
return pp
|
||||
}
|
||||
|
||||
// Copy the payload of symbol src to dst. Both src and dst must be external
|
||||
|
||||
@@ -46,7 +46,8 @@ func TestAll(t *testing.T) {
|
||||
if info.Old != "" && info.Changed == 0 {
|
||||
t.Errorf("Name=%s has Old, missing Changed", info.Name)
|
||||
}
|
||||
if !strings.Contains(doc, "`"+info.Name+"`") {
|
||||
if !strings.Contains(doc, "`"+info.Name+"`") &&
|
||||
!strings.Contains(doc, "`"+info.Name+"=") {
|
||||
t.Errorf("Name=%s not documented in doc/godebug.md", info.Name)
|
||||
}
|
||||
if !info.Opaque && !incs[info.Name] {
|
||||
|
||||
@@ -25,6 +25,7 @@ type Info struct {
|
||||
// Note: After adding entries to this table, update the list in doc/godebug.md as well.
|
||||
// (Otherwise the test in this package will fail.)
|
||||
var All = []Info{
|
||||
{Name: "allowmultiplevcs", Package: "cmd/go"},
|
||||
{Name: "asynctimerchan", Package: "time", Changed: 23, Old: "1"},
|
||||
{Name: "dataindependenttiming", Package: "crypto/subtle", Opaque: true},
|
||||
{Name: "execerrdot", Package: "os/exec"},
|
||||
|
||||
@@ -1049,10 +1049,18 @@ func (h *mheap) allocUserArenaChunk() *mspan {
|
||||
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
|
||||
s.isUserArenaChunk = true
|
||||
s.elemsize -= userArenaChunkReserveBytes()
|
||||
s.limit = s.base() + s.elemsize
|
||||
s.freeindex = 1
|
||||
s.allocCount = 1
|
||||
|
||||
// Adjust s.limit down to the object-containing part of the span.
|
||||
//
|
||||
// This is just to create a slightly tighter bound on the limit.
|
||||
// It's totally OK if the garbage collector, in particular
|
||||
// conservative scanning, can temporarily observes an inflated
|
||||
// limit. It will simply mark the whole chunk or just skip it
|
||||
// since we're in the mark phase anyway.
|
||||
s.limit = s.base() + s.elemsize
|
||||
|
||||
// Adjust size to include redzone.
|
||||
if asanenabled {
|
||||
s.elemsize -= redZoneSize(s.elemsize)
|
||||
|
||||
@@ -252,6 +252,14 @@ func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
|
||||
// Put the large span in the mcentral swept list so that it's
|
||||
// visible to the background sweeper.
|
||||
mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
|
||||
|
||||
// Adjust s.limit down to the object-containing part of the span.
|
||||
//
|
||||
// This is just to create a slightly tighter bound on the limit.
|
||||
// It's totally OK if the garbage collector, in particular
|
||||
// conservative scanning, can temporarily observes an inflated
|
||||
// limit. It will simply mark the whole object or just skip it
|
||||
// since we're in the mark phase anyway.
|
||||
s.limit = s.base() + size
|
||||
s.initHeapBits()
|
||||
return s
|
||||
|
||||
@@ -249,17 +249,10 @@ func (c *mcentral) uncacheSpan(s *mspan) {
|
||||
// grow allocates a new empty span from the heap and initializes it for c's size class.
|
||||
func (c *mcentral) grow() *mspan {
|
||||
npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
|
||||
size := uintptr(class_to_size[c.spanclass.sizeclass()])
|
||||
|
||||
s := mheap_.alloc(npages, c.spanclass)
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use division by multiplication and shifts to quickly compute:
|
||||
// n := (npages << _PageShift) / size
|
||||
n := s.divideByElemSize(npages << _PageShift)
|
||||
s.limit = s.base() + size*n
|
||||
s.initHeapBits()
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -231,6 +231,7 @@ func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
|
||||
memFree(unsafe.Pointer(end), endLen)
|
||||
}
|
||||
memCheck()
|
||||
unlock(&memlock)
|
||||
return unsafe.Pointer(pAligned), size
|
||||
}
|
||||
|
||||
|
||||
@@ -230,6 +230,11 @@ Below is the full list of supported metrics, ordered lexicographically.
|
||||
/gc/stack/starting-size:bytes
|
||||
The stack size of new goroutines.
|
||||
|
||||
/godebug/non-default-behavior/allowmultiplevcs:events
|
||||
The number of non-default behaviors executed by the cmd/go
|
||||
package due to a non-default GODEBUG=allowmultiplevcs=...
|
||||
setting.
|
||||
|
||||
/godebug/non-default-behavior/asynctimerchan:events
|
||||
The number of non-default behaviors executed by the time package
|
||||
due to a non-default GODEBUG=asynctimerchan=... setting.
|
||||
|
||||
@@ -1030,7 +1030,7 @@ func gcMarkTermination(stw worldStop) {
|
||||
// N.B. The execution tracer is not aware of this status
|
||||
// transition and handles it specially based on the
|
||||
// wait reason.
|
||||
casGToWaitingForGC(curgp, _Grunning, waitReasonGarbageCollection)
|
||||
casGToWaitingForSuspendG(curgp, _Grunning, waitReasonGarbageCollection)
|
||||
|
||||
// Run gc on the g0 stack. We do this so that the g stack
|
||||
// we're currently running on will no longer change. Cuts
|
||||
@@ -1482,7 +1482,8 @@ func gcBgMarkWorker(ready chan struct{}) {
|
||||
|
||||
systemstack(func() {
|
||||
// Mark our goroutine preemptible so its stack
|
||||
// can be scanned. This lets two mark workers
|
||||
// can be scanned or observed by the execution
|
||||
// tracer. This, for example, lets two mark workers
|
||||
// scan each other (otherwise, they would
|
||||
// deadlock). We must not modify anything on
|
||||
// the G stack. However, stack shrinking is
|
||||
@@ -1492,7 +1493,7 @@ func gcBgMarkWorker(ready chan struct{}) {
|
||||
// N.B. The execution tracer is not aware of this status
|
||||
// transition and handles it specially based on the
|
||||
// wait reason.
|
||||
casGToWaitingForGC(gp, _Grunning, waitReasonGCWorkerActive)
|
||||
casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCWorkerActive)
|
||||
switch pp.gcMarkWorkerMode {
|
||||
default:
|
||||
throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
|
||||
|
||||
@@ -219,7 +219,7 @@ func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 {
|
||||
userG := getg().m.curg
|
||||
selfScan := gp == userG && readgstatus(userG) == _Grunning
|
||||
if selfScan {
|
||||
casGToWaitingForGC(userG, _Grunning, waitReasonGarbageCollectionScan)
|
||||
casGToWaitingForSuspendG(userG, _Grunning, waitReasonGarbageCollectionScan)
|
||||
}
|
||||
|
||||
// TODO: suspendG blocks (and spins) until gp
|
||||
@@ -662,7 +662,7 @@ func gcAssistAlloc1(gp *g, scanWork int64) {
|
||||
}
|
||||
|
||||
// gcDrainN requires the caller to be preemptible.
|
||||
casGToWaitingForGC(gp, _Grunning, waitReasonGCAssistMarking)
|
||||
casGToWaitingForSuspendG(gp, _Grunning, waitReasonGCAssistMarking)
|
||||
|
||||
// drain own cached work first in the hopes that it
|
||||
// will be more cache friendly.
|
||||
|
||||
@@ -1398,7 +1398,6 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
|
||||
if typ.manual() {
|
||||
s.manualFreeList = 0
|
||||
s.nelems = 0
|
||||
s.limit = s.base() + s.npages*pageSize
|
||||
s.state.set(mSpanManual)
|
||||
} else {
|
||||
// We must set span properties before the span is published anywhere
|
||||
@@ -1426,6 +1425,9 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
|
||||
s.gcmarkBits = newMarkBits(uintptr(s.nelems))
|
||||
s.allocBits = newAllocBits(uintptr(s.nelems))
|
||||
|
||||
// Adjust s.limit down to the object-containing part of the span.
|
||||
s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems)
|
||||
|
||||
// It's safe to access h.sweepgen without the heap lock because it's
|
||||
// only ever updated with the world stopped and we run on the
|
||||
// systemstack which blocks a STW transition.
|
||||
@@ -1709,6 +1711,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
|
||||
span.list = nil
|
||||
span.startAddr = base
|
||||
span.npages = npages
|
||||
span.limit = base + npages*pageSize // see go.dev/issue/74288; adjusted later for heap spans
|
||||
span.allocCount = 0
|
||||
span.spanclass = 0
|
||||
span.elemsize = 0
|
||||
|
||||
@@ -1503,11 +1503,6 @@ func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
|
||||
// so here we check _Gdead first.
|
||||
return
|
||||
}
|
||||
if isSystemGoroutine(gp1, true) {
|
||||
// System goroutines should not appear in the profile. (The finalizer
|
||||
// goroutine is marked as "already profiled".)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
prev := gp1.goroutineProfiled.Load()
|
||||
@@ -1545,6 +1540,17 @@ func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
|
||||
// stack), or from the scheduler in preparation to execute gp1 (running on the
|
||||
// system stack).
|
||||
func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
|
||||
if isSystemGoroutine(gp1, false) {
|
||||
// System goroutines should not appear in the profile.
|
||||
// Check this here and not in tryRecordGoroutineProfile because isSystemGoroutine
|
||||
// may change on a goroutine while it is executing, so while the scheduler might
|
||||
// see a system goroutine, goroutineProfileWithLabelsConcurrent might not, and
|
||||
// this inconsistency could cause invariants to be violated, such as trying to
|
||||
// record the stack of a running goroutine below. In short, we still want system
|
||||
// goroutines to participate in the same state machine on gp1.goroutineProfiled as
|
||||
// everything else, we just don't record the stack in the profile.
|
||||
return
|
||||
}
|
||||
if readgstatus(gp1) == _Grunning {
|
||||
print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
|
||||
throw("cannot read stack of running goroutine")
|
||||
|
||||
@@ -1808,6 +1808,45 @@ func TestGoroutineProfileCoro(t *testing.T) {
|
||||
goroutineProf.WriteTo(io.Discard, 1)
|
||||
}
|
||||
|
||||
// This test tries to provoke a situation wherein the finalizer goroutine is
|
||||
// erroneously inspected by the goroutine profiler in such a way that could
|
||||
// cause a crash. See go.dev/issue/74090.
|
||||
func TestGoroutineProfileIssue74090(t *testing.T) {
|
||||
testenv.MustHaveParallelism(t)
|
||||
|
||||
goroutineProf := Lookup("goroutine")
|
||||
|
||||
// T is a pointer type so it won't be allocated by the tiny
|
||||
// allocator, which can lead to its finalizer not being called
|
||||
// during this test.
|
||||
type T *byte
|
||||
for range 10 {
|
||||
// We use finalizers for this test because finalizers transition between
|
||||
// system and user goroutine on each call, since there's substantially
|
||||
// more work to do to set up a finalizer call. Cleanups, on the other hand,
|
||||
// transition once for a whole batch, and so are less likely to trigger
|
||||
// the failure. Under stress testing conditions this test fails approximately
|
||||
// 5 times every 1000 executions on a 64 core machine without the appropriate
|
||||
// fix, which is not ideal but if this test crashes at all, it's a clear
|
||||
// signal that something is broken.
|
||||
var objs []*T
|
||||
for range 10000 {
|
||||
obj := new(T)
|
||||
runtime.SetFinalizer(obj, func(_ interface{}) {})
|
||||
objs = append(objs, obj)
|
||||
}
|
||||
objs = nil
|
||||
|
||||
// Queue up all the finalizers.
|
||||
runtime.GC()
|
||||
|
||||
// Try to run a goroutine profile concurrently with finalizer execution
|
||||
// to trigger the bug.
|
||||
var w strings.Builder
|
||||
goroutineProf.WriteTo(&w, 1)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGoroutine(b *testing.B) {
|
||||
withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
|
||||
return func(b *testing.B) {
|
||||
|
||||
@@ -1306,13 +1306,13 @@ func casGToWaiting(gp *g, old uint32, reason waitReason) {
|
||||
casgstatus(gp, old, _Gwaiting)
|
||||
}
|
||||
|
||||
// casGToWaitingForGC transitions gp from old to _Gwaiting, and sets the wait reason.
|
||||
// The wait reason must be a valid isWaitingForGC wait reason.
|
||||
// casGToWaitingForSuspendG transitions gp from old to _Gwaiting, and sets the wait reason.
|
||||
// The wait reason must be a valid isWaitingForSuspendG wait reason.
|
||||
//
|
||||
// Use this over casgstatus when possible to ensure that a waitreason is set.
|
||||
func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
|
||||
if !reason.isWaitingForGC() {
|
||||
throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
|
||||
func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
|
||||
if !reason.isWaitingForSuspendG() {
|
||||
throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
|
||||
}
|
||||
casGToWaiting(gp, old, reason)
|
||||
}
|
||||
@@ -1446,23 +1446,7 @@ func stopTheWorld(reason stwReason) worldStop {
|
||||
gp := getg()
|
||||
gp.m.preemptoff = reason.String()
|
||||
systemstack(func() {
|
||||
// Mark the goroutine which called stopTheWorld preemptible so its
|
||||
// stack may be scanned.
|
||||
// This lets a mark worker scan us while we try to stop the world
|
||||
// since otherwise we could get in a mutual preemption deadlock.
|
||||
// We must not modify anything on the G stack because a stack shrink
|
||||
// may occur. A stack shrink is otherwise OK though because in order
|
||||
// to return from this function (and to leave the system stack) we
|
||||
// must have preempted all goroutines, including any attempting
|
||||
// to scan our stack, in which case, any stack shrinking will
|
||||
// have already completed by the time we exit.
|
||||
//
|
||||
// N.B. The execution tracer is not aware of this status
|
||||
// transition and handles it specially based on the
|
||||
// wait reason.
|
||||
casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
|
||||
stopTheWorldContext = stopTheWorldWithSema(reason) // avoid write to stack
|
||||
casgstatus(gp, _Gwaiting, _Grunning)
|
||||
})
|
||||
return stopTheWorldContext
|
||||
}
|
||||
@@ -1551,7 +1535,30 @@ var gcsema uint32 = 1
|
||||
//
|
||||
// Returns the STW context. When starting the world, this context must be
|
||||
// passed to startTheWorldWithSema.
|
||||
//
|
||||
//go:systemstack
|
||||
func stopTheWorldWithSema(reason stwReason) worldStop {
|
||||
// Mark the goroutine which called stopTheWorld preemptible so its
|
||||
// stack may be scanned by the GC or observed by the execution tracer.
|
||||
//
|
||||
// This lets a mark worker scan us or the execution tracer take our
|
||||
// stack while we try to stop the world since otherwise we could get
|
||||
// in a mutual preemption deadlock.
|
||||
//
|
||||
// We must not modify anything on the G stack because a stack shrink
|
||||
// may occur, now that we switched to _Gwaiting, specifically if we're
|
||||
// doing this during the mark phase (mark termination excepted, since
|
||||
// we know that stack scanning is done by that point). A stack shrink
|
||||
// is otherwise OK though because in order to return from this function
|
||||
// (and to leave the system stack) we must have preempted all
|
||||
// goroutines, including any attempting to scan our stack, in which
|
||||
// case, any stack shrinking will have already completed by the time we
|
||||
// exit.
|
||||
//
|
||||
// N.B. The execution tracer is not aware of this status transition and
|
||||
// andles it specially based on the wait reason.
|
||||
casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
|
||||
|
||||
trace := traceAcquire()
|
||||
if trace.ok() {
|
||||
trace.STWStart(reason)
|
||||
@@ -1659,6 +1666,9 @@ func stopTheWorldWithSema(reason stwReason) worldStop {
|
||||
|
||||
worldStopped()
|
||||
|
||||
// Switch back to _Grunning, now that the world is stopped.
|
||||
casgstatus(getg().m.curg, _Gwaiting, _Grunning)
|
||||
|
||||
return worldStop{
|
||||
reason: reason,
|
||||
startedStopping: start,
|
||||
@@ -2023,15 +2033,23 @@ found:
|
||||
func forEachP(reason waitReason, fn func(*p)) {
|
||||
systemstack(func() {
|
||||
gp := getg().m.curg
|
||||
// Mark the user stack as preemptible so that it may be scanned.
|
||||
// Otherwise, our attempt to force all P's to a safepoint could
|
||||
// result in a deadlock as we attempt to preempt a worker that's
|
||||
// trying to preempt us (e.g. for a stack scan).
|
||||
// Mark the user stack as preemptible so that it may be scanned
|
||||
// by the GC or observed by the execution tracer. Otherwise, our
|
||||
// attempt to force all P's to a safepoint could result in a
|
||||
// deadlock as we attempt to preempt a goroutine that's trying
|
||||
// to preempt us (e.g. for a stack scan).
|
||||
//
|
||||
// We must not modify anything on the G stack because a stack shrink
|
||||
// may occur. A stack shrink is otherwise OK though because in order
|
||||
// to return from this function (and to leave the system stack) we
|
||||
// must have preempted all goroutines, including any attempting
|
||||
// to scan our stack, in which case, any stack shrinking will
|
||||
// have already completed by the time we exit.
|
||||
//
|
||||
// N.B. The execution tracer is not aware of this status
|
||||
// transition and handles it specially based on the
|
||||
// wait reason.
|
||||
casGToWaitingForGC(gp, _Grunning, reason)
|
||||
casGToWaitingForSuspendG(gp, _Grunning, reason)
|
||||
forEachPInternal(fn)
|
||||
casgstatus(gp, _Gwaiting, _Grunning)
|
||||
})
|
||||
|
||||
@@ -1147,17 +1147,17 @@ func (w waitReason) isMutexWait() bool {
|
||||
w == waitReasonSyncRWMutexLock
|
||||
}
|
||||
|
||||
func (w waitReason) isWaitingForGC() bool {
|
||||
return isWaitingForGC[w]
|
||||
func (w waitReason) isWaitingForSuspendG() bool {
|
||||
return isWaitingForSuspendG[w]
|
||||
}
|
||||
|
||||
// isWaitingForGC indicates that a goroutine is only entering _Gwaiting and
|
||||
// setting a waitReason because it needs to be able to let the GC take ownership
|
||||
// of its stack. The G is always actually executing on the system stack, in
|
||||
// these cases.
|
||||
// isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and
|
||||
// setting a waitReason because it needs to be able to let the suspendG
|
||||
// (used by the GC and the execution tracer) take ownership of its stack.
|
||||
// The G is always actually executing on the system stack in these cases.
|
||||
//
|
||||
// TODO(mknyszek): Consider replacing this with a new dedicated G status.
|
||||
var isWaitingForGC = [len(waitReasonStrings)]bool{
|
||||
var isWaitingForSuspendG = [len(waitReasonStrings)]bool{
|
||||
waitReasonStoppingTheWorld: true,
|
||||
waitReasonGCMarkTermination: true,
|
||||
waitReasonGarbageCollection: true,
|
||||
|
||||
@@ -1163,14 +1163,14 @@ func isShrinkStackSafe(gp *g) bool {
|
||||
return false
|
||||
}
|
||||
// We also can't copy the stack while tracing is enabled, and
|
||||
// gp is in _Gwaiting solely to make itself available to the GC.
|
||||
// gp is in _Gwaiting solely to make itself available to suspendG.
|
||||
// In these cases, the G is actually executing on the system
|
||||
// stack, and the execution tracer may want to take a stack trace
|
||||
// of the G's stack. Note: it's safe to access gp.waitreason here.
|
||||
// We're only checking if this is true if we took ownership of the
|
||||
// G with the _Gscan bit. This prevents the goroutine from transitioning,
|
||||
// which prevents gp.waitreason from changing.
|
||||
if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForGC() {
|
||||
if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForSuspendG() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -375,7 +375,7 @@ func traceAdvance(stopTrace bool) {
|
||||
me := getg().m.curg
|
||||
// We don't have to handle this G status transition because we
|
||||
// already eliminated ourselves from consideration above.
|
||||
casGToWaitingForGC(me, _Grunning, waitReasonTraceGoroutineStatus)
|
||||
casGToWaitingForSuspendG(me, _Grunning, waitReasonTraceGoroutineStatus)
|
||||
// We need to suspend and take ownership of the G to safely read its
|
||||
// goid. Note that we can't actually emit the event at this point
|
||||
// because we might stop the G in a window where it's unsafe to write
|
||||
|
||||
@@ -160,11 +160,12 @@ func goStatusToTraceGoStatus(status uint32, wr waitReason) traceGoStatus {
|
||||
// There are a number of cases where a G might end up in
|
||||
// _Gwaiting but it's actually running in a non-preemptive
|
||||
// state but needs to present itself as preempted to the
|
||||
// garbage collector. In these cases, we're not going to
|
||||
// emit an event, and we want these goroutines to appear in
|
||||
// the final trace as if they're running, not blocked.
|
||||
// garbage collector and traceAdvance (via suspendG). In
|
||||
// these cases, we're not going to emit an event, and we
|
||||
// want these goroutines to appear in the final trace as
|
||||
// if they're running, not blocked.
|
||||
tgs = traceGoWaiting
|
||||
if status == _Gwaiting && wr.isWaitingForGC() {
|
||||
if status == _Gwaiting && wr.isWaitingForSuspendG() {
|
||||
tgs = traceGoRunning
|
||||
}
|
||||
case _Gdead:
|
||||
|
||||
@@ -468,6 +468,16 @@ func checkMergedShifts64(a [256]uint32, b [256]uint64, c [256]byte, v uint64) {
|
||||
b[1] = b[(v>>20)&0xFF]
|
||||
// ppc64x: "RLWNM", -"SLD"
|
||||
b[2] = b[((uint64((uint32(v) >> 21)) & 0x3f) << 4)]
|
||||
// ppc64x: -"RLWNM"
|
||||
b[3] = (b[3] << 24) & 0xFFFFFF000000
|
||||
// ppc64x: "RLWNM\t[$]24, R[0-9]+, [$]0, [$]7,"
|
||||
b[4] = (b[4] << 24) & 0xFF000000
|
||||
// ppc64x: "RLWNM\t[$]24, R[0-9]+, [$]0, [$]7,"
|
||||
b[5] = (b[5] << 24) & 0xFF00000F
|
||||
// ppc64x: -"RLWNM"
|
||||
b[6] = (b[6] << 0) & 0xFF00000F
|
||||
// ppc64x: "RLWNM\t[$]4, R[0-9]+, [$]28, [$]31,"
|
||||
b[7] = (b[7] >> 28) & 0xF
|
||||
// ppc64x: "RLWNM\t[$]11, R[0-9]+, [$]10, [$]15"
|
||||
c[0] = c[((v>>5)&0x3F)<<16]
|
||||
// ppc64x: "ANDCC\t[$]8064,"
|
||||
|
||||
32
test/fixedbugs/issue73748a.go
Normal file
32
test/fixedbugs/issue73748a.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// run
|
||||
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"runtime/trace"
|
||||
)
|
||||
|
||||
type T struct {
|
||||
a [16]int
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func f(x *T) {
|
||||
*x = T{}
|
||||
}
|
||||
|
||||
func main() {
|
||||
trace.Start(io.Discard)
|
||||
defer func() {
|
||||
recover()
|
||||
trace.Log(context.Background(), "a", "b")
|
||||
|
||||
}()
|
||||
f(nil)
|
||||
}
|
||||
32
test/fixedbugs/issue73748b.go
Normal file
32
test/fixedbugs/issue73748b.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// run
|
||||
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"runtime/trace"
|
||||
)
|
||||
|
||||
type T struct {
|
||||
a [16]int
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func f(x, y *T) {
|
||||
*x = *y
|
||||
}
|
||||
|
||||
func main() {
|
||||
trace.Start(io.Discard)
|
||||
defer func() {
|
||||
recover()
|
||||
trace.Log(context.Background(), "a", "b")
|
||||
|
||||
}()
|
||||
f(nil, nil)
|
||||
}
|
||||
Reference in New Issue
Block a user