Update to go1.23.3
This commit is contained in:
@@ -208,6 +208,18 @@ func coroswitch_m(gp *g) {
|
||||
// directly if possible.
|
||||
setGNoWB(&mp.curg, gnext)
|
||||
setMNoWB(&gnext.m, mp)
|
||||
|
||||
// Synchronize with any out-standing goroutine profile. We're about to start
|
||||
// executing, and an invariant of the profiler is that we tryRecordGoroutineProfile
|
||||
// whenever a goroutine is about to start running.
|
||||
//
|
||||
// N.B. We must do this before transitioning to _Grunning but after installing gnext
|
||||
// in curg, so that we have a valid curg for allocation (tryRecordGoroutineProfile
|
||||
// may allocate).
|
||||
if goroutineProfile.active {
|
||||
tryRecordGoroutineProfile(gnext, nil, osyield)
|
||||
}
|
||||
|
||||
if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) {
|
||||
// The CAS failed: use casgstatus, which will take care of
|
||||
// coordinating with the garbage collector about the state change.
|
||||
|
||||
@@ -1136,11 +1136,12 @@ func expandFrames(p []BlockProfileRecord) {
|
||||
for i := range p {
|
||||
cf := CallersFrames(p[i].Stack())
|
||||
j := 0
|
||||
for ; j < len(expandedStack); j++ {
|
||||
for j < len(expandedStack) {
|
||||
f, more := cf.Next()
|
||||
// f.PC is a "call PC", but later consumers will expect
|
||||
// "return PCs"
|
||||
expandedStack[j] = f.PC + 1
|
||||
j++
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
@@ -1270,7 +1271,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok
|
||||
// of calling ThreadCreateProfile directly.
|
||||
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
|
||||
return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
|
||||
copy(p[0].Stack0[:], r.Stack)
|
||||
i := copy(p[0].Stack0[:], r.Stack)
|
||||
clear(p[0].Stack0[i:])
|
||||
p = p[1:]
|
||||
})
|
||||
}
|
||||
@@ -1649,7 +1651,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
|
||||
return
|
||||
}
|
||||
for i, mr := range records[0:n] {
|
||||
copy(p[i].Stack0[:], mr.Stack)
|
||||
l := copy(p[i].Stack0[:], mr.Stack)
|
||||
clear(p[i].Stack0[l:])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -879,8 +879,9 @@ func runPerThreadSyscall() {
|
||||
}
|
||||
|
||||
const (
|
||||
_SI_USER = 0
|
||||
_SI_TKILL = -6
|
||||
_SI_USER = 0
|
||||
_SI_TKILL = -6
|
||||
_SYS_SECCOMP = 1
|
||||
)
|
||||
|
||||
// sigFromUser reports whether the signal was sent because of a call
|
||||
@@ -892,6 +893,14 @@ func (c *sigctxt) sigFromUser() bool {
|
||||
return code == _SI_USER || code == _SI_TKILL
|
||||
}
|
||||
|
||||
// sigFromSeccomp reports whether the signal was sent from seccomp.
|
||||
//
|
||||
//go:nosplit
|
||||
func (c *sigctxt) sigFromSeccomp() bool {
|
||||
code := int32(c.sigcode())
|
||||
return code == _SYS_SECCOMP
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) {
|
||||
r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0)
|
||||
|
||||
@@ -13,3 +13,10 @@ package runtime
|
||||
func (c *sigctxt) sigFromUser() bool {
|
||||
return c.sigcode() == _SI_USER
|
||||
}
|
||||
|
||||
// sigFromSeccomp reports whether the signal was sent from seccomp.
|
||||
//
|
||||
//go:nosplit
|
||||
func (c *sigctxt) sigFromSeccomp() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ func TestMemoryProfiler(t *testing.T) {
|
||||
}
|
||||
t.Logf("Profile = %v", p)
|
||||
|
||||
stks := stacks(p)
|
||||
stks := profileStacks(p)
|
||||
for _, test := range tests {
|
||||
if !containsStack(stks, test.stk) {
|
||||
t.Fatalf("No matching stack entry for %q\n\nProfile:\n%v\n", test.stk, p)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"internal/syscall/unix"
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"iter"
|
||||
"math"
|
||||
"math/big"
|
||||
"os"
|
||||
@@ -981,7 +982,7 @@ func TestBlockProfile(t *testing.T) {
|
||||
t.Fatalf("invalid profile: %v", err)
|
||||
}
|
||||
|
||||
stks := stacks(p)
|
||||
stks := profileStacks(p)
|
||||
for _, test := range tests {
|
||||
if !containsStack(stks, test.stk) {
|
||||
t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
|
||||
@@ -991,7 +992,7 @@ func TestBlockProfile(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func stacks(p *profile.Profile) (res [][]string) {
|
||||
func profileStacks(p *profile.Profile) (res [][]string) {
|
||||
for _, s := range p.Sample {
|
||||
var stk []string
|
||||
for _, l := range s.Location {
|
||||
@@ -1004,6 +1005,22 @@ func stacks(p *profile.Profile) (res [][]string) {
|
||||
return res
|
||||
}
|
||||
|
||||
func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) {
|
||||
for _, record := range records {
|
||||
frames := runtime.CallersFrames(record.Stack())
|
||||
var stk []string
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
stk = append(stk, frame.Function)
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
res = append(res, stk)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func containsStack(got [][]string, want []string) bool {
|
||||
for _, stk := range got {
|
||||
if len(stk) < len(want) {
|
||||
@@ -1288,7 +1305,7 @@ func TestMutexProfile(t *testing.T) {
|
||||
t.Fatalf("invalid profile: %v", err)
|
||||
}
|
||||
|
||||
stks := stacks(p)
|
||||
stks := profileStacks(p)
|
||||
for _, want := range [][]string{
|
||||
{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
|
||||
} {
|
||||
@@ -1328,6 +1345,28 @@ func TestMutexProfile(t *testing.T) {
|
||||
t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("records", func(t *testing.T) {
|
||||
// Record a mutex profile using the structured record API.
|
||||
var records []runtime.BlockProfileRecord
|
||||
for {
|
||||
n, ok := runtime.MutexProfile(records)
|
||||
if ok {
|
||||
records = records[:n]
|
||||
break
|
||||
}
|
||||
records = make([]runtime.BlockProfileRecord, n*2)
|
||||
}
|
||||
|
||||
// Check that we see the same stack trace as the proto profile. For
|
||||
// historical reason we expect a runtime.goexit root frame here that is
|
||||
// omitted in the proto profile.
|
||||
stks := blockRecordStacks(records)
|
||||
want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"}
|
||||
if !containsStack(stks, want) {
|
||||
t.Errorf("No matching stack entry for %+v", want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMutexProfileRateAdjust(t *testing.T) {
|
||||
@@ -1754,6 +1793,50 @@ func TestGoroutineProfileConcurrency(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for #69998.
|
||||
func TestGoroutineProfileCoro(t *testing.T) {
|
||||
testenv.MustHaveParallelism(t)
|
||||
|
||||
goroutineProf := Lookup("goroutine")
|
||||
|
||||
// Set up a goroutine to just create and run coroutine goroutines all day.
|
||||
iterFunc := func() {
|
||||
p, stop := iter.Pull2(
|
||||
func(yield func(int, int) bool) {
|
||||
for i := 0; i < 10000; i++ {
|
||||
if !yield(i, i) {
|
||||
return
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
defer stop()
|
||||
for {
|
||||
_, _, ok := p()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
done := make(chan struct{})
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
iterFunc()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Take a goroutine profile. If the bug in #69998 is present, this will crash
|
||||
// with high probability. We don't care about the output for this bug.
|
||||
goroutineProf.WriteTo(io.Discard, 1)
|
||||
}
|
||||
|
||||
func BenchmarkGoroutine(b *testing.B) {
|
||||
withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
|
||||
return func(b *testing.B) {
|
||||
@@ -2441,16 +2524,7 @@ func TestTimeVDSO(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProfilerStackDepth(t *testing.T) {
|
||||
// Disable sampling, otherwise it's difficult to assert anything.
|
||||
oldMemRate := runtime.MemProfileRate
|
||||
runtime.MemProfileRate = 1
|
||||
runtime.SetBlockProfileRate(1)
|
||||
oldMutexRate := runtime.SetMutexProfileFraction(1)
|
||||
t.Cleanup(func() {
|
||||
runtime.MemProfileRate = oldMemRate
|
||||
runtime.SetBlockProfileRate(0)
|
||||
runtime.SetMutexProfileFraction(oldMutexRate)
|
||||
})
|
||||
t.Cleanup(disableSampling())
|
||||
|
||||
const depth = 128
|
||||
go produceProfileEvents(t, depth)
|
||||
@@ -2478,7 +2552,7 @@ func TestProfilerStackDepth(t *testing.T) {
|
||||
}
|
||||
t.Logf("Profile = %v", p)
|
||||
|
||||
stks := stacks(p)
|
||||
stks := profileStacks(p)
|
||||
var stk []string
|
||||
for _, s := range stks {
|
||||
if hasPrefix(s, test.prefix) {
|
||||
@@ -2742,3 +2816,84 @@ runtime/pprof.inlineA`,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProfileRecordNullPadding(t *testing.T) {
|
||||
// Produce events for the different profile types.
|
||||
t.Cleanup(disableSampling())
|
||||
memSink = make([]byte, 1) // MemProfile
|
||||
<-time.After(time.Millisecond) // BlockProfile
|
||||
blockMutex(t) // MutexProfile
|
||||
runtime.GC()
|
||||
|
||||
// Test that all profile records are null padded.
|
||||
testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile)
|
||||
testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile)
|
||||
testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile)
|
||||
testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) {
|
||||
return runtime.MemProfile(p, true)
|
||||
})
|
||||
testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) {
|
||||
return runtime.MemProfile(p, false)
|
||||
})
|
||||
// Not testing ThreadCreateProfile because it is broken, see issue 6104.
|
||||
}
|
||||
|
||||
func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) {
|
||||
stack0 := func(sr *T) *[32]uintptr {
|
||||
switch t := any(sr).(type) {
|
||||
case *runtime.StackRecord:
|
||||
return &t.Stack0
|
||||
case *runtime.MemProfileRecord:
|
||||
return &t.Stack0
|
||||
case *runtime.BlockProfileRecord:
|
||||
return &t.Stack0
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type %T", sr))
|
||||
}
|
||||
}
|
||||
|
||||
t.Run(name, func(t *testing.T) {
|
||||
var p []T
|
||||
for {
|
||||
n, ok := fn(p)
|
||||
if ok {
|
||||
p = p[:n]
|
||||
break
|
||||
}
|
||||
p = make([]T, n*2)
|
||||
for i := range p {
|
||||
s0 := stack0(&p[i])
|
||||
for j := range s0 {
|
||||
// Poison the Stack0 array to identify lack of zero padding
|
||||
s0[j] = ^uintptr(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(p) == 0 {
|
||||
t.Fatal("no records found")
|
||||
}
|
||||
|
||||
for _, sr := range p {
|
||||
for i, v := range stack0(&sr) {
|
||||
if v == ^uintptr(0) {
|
||||
t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// disableSampling configures the profilers to capture all events, otherwise
|
||||
// it's difficult to assert anything.
|
||||
func disableSampling() func() {
|
||||
oldMemRate := runtime.MemProfileRate
|
||||
runtime.MemProfileRate = 1
|
||||
runtime.SetBlockProfileRate(1)
|
||||
oldMutexRate := runtime.SetMutexProfileFraction(1)
|
||||
return func() {
|
||||
runtime.MemProfileRate = oldMemRate
|
||||
runtime.SetBlockProfileRate(0)
|
||||
runtime.SetMutexProfileFraction(oldMutexRate)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -575,15 +575,15 @@ func TestGdbAutotmpTypes(t *testing.T) {
|
||||
|
||||
// Check that the backtrace matches the source code.
|
||||
types := []string{
|
||||
"[]main.astruct;",
|
||||
"bucket<string,main.astruct>;",
|
||||
"hash<string,main.astruct>;",
|
||||
"main.astruct;",
|
||||
"hash<string,main.astruct> * map[string]main.astruct;",
|
||||
"[]main.astruct",
|
||||
"bucket<string,main.astruct>",
|
||||
"hash<string,main.astruct>",
|
||||
"main.astruct",
|
||||
"hash<string,main.astruct> * map[string]main.astruct",
|
||||
}
|
||||
for _, name := range types {
|
||||
if !strings.Contains(sgot, name) {
|
||||
t.Fatalf("could not find %s in 'info typrs astruct' output", name)
|
||||
t.Fatalf("could not find %q in 'info typrs astruct' output", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -605,6 +605,19 @@ var crashing atomic.Int32
|
||||
var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
|
||||
var testSigusr1 func(gp *g) bool
|
||||
|
||||
// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065.
|
||||
var sigsysIgnored uint32
|
||||
|
||||
//go:linkname ignoreSIGSYS os.ignoreSIGSYS
|
||||
func ignoreSIGSYS() {
|
||||
atomic.Store(&sigsysIgnored, 1)
|
||||
}
|
||||
|
||||
//go:linkname restoreSIGSYS os.restoreSIGSYS
|
||||
func restoreSIGSYS() {
|
||||
atomic.Store(&sigsysIgnored, 0)
|
||||
}
|
||||
|
||||
// sighandler is invoked when a signal occurs. The global g will be
|
||||
// set to a gsignal goroutine and we will be running on the alternate
|
||||
// signal stack. The parameter gp will be the value of the global g
|
||||
@@ -715,6 +728,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
|
||||
return
|
||||
}
|
||||
|
||||
if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if flags&_SigKill != 0 {
|
||||
dieFromSignal(sig)
|
||||
}
|
||||
|
||||
@@ -467,43 +467,37 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint
|
||||
//go:linkname syscall_Syscall syscall.Syscall
|
||||
//go:nosplit
|
||||
func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall6 syscall.Syscall6
|
||||
//go:nosplit
|
||||
func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall9 syscall.Syscall9
|
||||
//go:nosplit
|
||||
func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall12 syscall.Syscall12
|
||||
//go:nosplit
|
||||
func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall15 syscall.Syscall15
|
||||
//go:nosplit
|
||||
func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall18 syscall.Syscall18
|
||||
//go:nosplit
|
||||
func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18)
|
||||
}
|
||||
|
||||
// maxArgs should be divisible by 2, as Windows stack
|
||||
@@ -516,7 +510,15 @@ const maxArgs = 42
|
||||
//go:linkname syscall_SyscallN syscall.SyscallN
|
||||
//go:nosplit
|
||||
func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
|
||||
if len(args) > maxArgs {
|
||||
return syscall_syscalln(fn, uintptr(len(args)), args...)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) {
|
||||
if n > uintptr(len(args)) {
|
||||
panic("syscall: n > len(args)") // should not be reachable from user code
|
||||
}
|
||||
if n > maxArgs {
|
||||
panic("runtime: SyscallN has too many arguments")
|
||||
}
|
||||
|
||||
@@ -525,7 +527,7 @@ func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
|
||||
// calls back into Go.
|
||||
c := &getg().m.winsyscall
|
||||
c.fn = fn
|
||||
c.n = uintptr(len(args))
|
||||
c.n = n
|
||||
if c.n != 0 {
|
||||
c.args = uintptr(noescape(unsafe.Pointer(&args[0])))
|
||||
}
|
||||
|
||||
@@ -1215,6 +1215,13 @@ func TestBigStackCallbackSyscall(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyscallStackUsage(t *testing.T) {
|
||||
// Test that the stack usage of a syscall doesn't exceed the limit.
|
||||
// See https://go.dev/issue/69813.
|
||||
syscall.Syscall15(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
}
|
||||
|
||||
// wantLoadLibraryEx reports whether we expect LoadLibraryEx to work for tests.
|
||||
func wantLoadLibraryEx() bool {
|
||||
return testenv.Builder() != "" && (runtime.GOARCH == "amd64" || runtime.GOARCH == "386")
|
||||
|
||||
@@ -30,35 +30,6 @@ type timer struct {
|
||||
state uint8 // state bits
|
||||
isChan bool // timer has a channel; immutable; can be read without lock
|
||||
|
||||
// isSending is used to handle races between running a
|
||||
// channel timer and stopping or resetting the timer.
|
||||
// It is used only for channel timers (t.isChan == true).
|
||||
// The lowest zero bit is set when about to send a value on the channel,
|
||||
// and cleared after sending the value.
|
||||
// The stop/reset code uses this to detect whether it
|
||||
// stopped the channel send.
|
||||
//
|
||||
// An isSending bit is set only when t.mu is held.
|
||||
// An isSending bit is cleared only when t.sendLock is held.
|
||||
// isSending is read only when both t.mu and t.sendLock are held.
|
||||
//
|
||||
// Setting and clearing Uint8 bits handles the case of
|
||||
// a timer that is reset concurrently with unlockAndRun.
|
||||
// If the reset timer runs immediately, we can wind up with
|
||||
// concurrent calls to unlockAndRun for the same timer.
|
||||
// Using matched bit set and clear in unlockAndRun
|
||||
// ensures that the value doesn't get temporarily out of sync.
|
||||
//
|
||||
// We use a uint8 to keep the timer struct small.
|
||||
// This means that we can only support up to 8 concurrent
|
||||
// runs of a timer, where a concurrent run can only occur if
|
||||
// we start a run, unlock the timer, the timer is reset to a new
|
||||
// value (or the ticker fires again), it is ready to run,
|
||||
// and it is actually run, all before the first run completes.
|
||||
// Since completing a run is fast, even 2 concurrent timer runs are
|
||||
// nearly impossible, so this should be safe in practice.
|
||||
isSending atomic.Uint8
|
||||
|
||||
blocked uint32 // number of goroutines blocked on timer's channel
|
||||
|
||||
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
|
||||
@@ -98,6 +69,20 @@ type timer struct {
|
||||
// sendLock protects sends on the timer's channel.
|
||||
// Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0.
|
||||
sendLock mutex
|
||||
|
||||
// isSending is used to handle races between running a
|
||||
// channel timer and stopping or resetting the timer.
|
||||
// It is used only for channel timers (t.isChan == true).
|
||||
// It is not used for tickers.
|
||||
// The value is incremented when about to send a value on the channel,
|
||||
// and decremented after sending the value.
|
||||
// The stop/reset code uses this to detect whether it
|
||||
// stopped the channel send.
|
||||
//
|
||||
// isSending is incremented only when t.mu is held.
|
||||
// isSending is decremented only when t.sendLock is held.
|
||||
// isSending is read only when both t.mu and t.sendLock are held.
|
||||
isSending atomic.Int32
|
||||
}
|
||||
|
||||
// init initializes a newly allocated timer t.
|
||||
@@ -467,7 +452,7 @@ func (t *timer) stop() bool {
|
||||
// send from actually happening. That means
|
||||
// that we should return true: the timer was
|
||||
// stopped, even though t.when may be zero.
|
||||
if t.isSending.Load() > 0 {
|
||||
if t.period == 0 && t.isSending.Load() > 0 {
|
||||
pending = true
|
||||
}
|
||||
}
|
||||
@@ -529,6 +514,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in
|
||||
t.maybeRunAsync()
|
||||
}
|
||||
t.trace("modify")
|
||||
oldPeriod := t.period
|
||||
t.period = period
|
||||
if f != nil {
|
||||
t.f = f
|
||||
@@ -570,7 +556,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in
|
||||
// send from actually happening. That means
|
||||
// that we should return true: the timer was
|
||||
// stopped, even though t.when may be zero.
|
||||
if t.isSending.Load() > 0 {
|
||||
if oldPeriod == 0 && t.isSending.Load() > 0 {
|
||||
pending = true
|
||||
}
|
||||
}
|
||||
@@ -1063,20 +1049,11 @@ func (t *timer) unlockAndRun(now int64) {
|
||||
}
|
||||
|
||||
async := debug.asynctimerchan.Load() != 0
|
||||
var isSendingClear uint8
|
||||
if !async && t.isChan {
|
||||
if !async && t.isChan && t.period == 0 {
|
||||
// Tell Stop/Reset that we are sending a value.
|
||||
// Set the lowest zero bit.
|
||||
// We do this awkward step because atomic.Uint8
|
||||
// doesn't support Add or CompareAndSwap.
|
||||
// We only set bits with t locked.
|
||||
v := t.isSending.Load()
|
||||
i := sys.TrailingZeros8(^v)
|
||||
if i == 8 {
|
||||
if t.isSending.Add(1) < 0 {
|
||||
throw("too many concurrent timer firings")
|
||||
}
|
||||
isSendingClear = 1 << i
|
||||
t.isSending.Or(isSendingClear)
|
||||
}
|
||||
|
||||
t.unlock()
|
||||
@@ -1114,6 +1091,16 @@ func (t *timer) unlockAndRun(now int64) {
|
||||
// started to send the value. That lets them correctly return
|
||||
// true meaning that no value was sent.
|
||||
lock(&t.sendLock)
|
||||
|
||||
if t.period == 0 {
|
||||
// We are committed to possibly sending a value
|
||||
// based on seq, so no need to keep telling
|
||||
// stop/modify that we are sending.
|
||||
if t.isSending.Add(-1) < 0 {
|
||||
throw("mismatched isSending updates")
|
||||
}
|
||||
}
|
||||
|
||||
if t.seq != seq {
|
||||
f = func(any, uintptr, int64) {}
|
||||
}
|
||||
@@ -1122,9 +1109,6 @@ func (t *timer) unlockAndRun(now int64) {
|
||||
f(arg, seq, delay)
|
||||
|
||||
if !async && t.isChan {
|
||||
// We are no longer sending a value.
|
||||
t.isSending.And(^isSendingClear)
|
||||
|
||||
unlock(&t.sendLock)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user