!323 backport the upstream patch, fix the overflow issue in runtime.netpollWaiters

From: @fuowang 
Reviewed-by: @hcnbxx, @jing-rui 
Signed-off-by: @jing-rui
This commit is contained in:
openeuler-ci-bot 2024-03-29 02:17:58 +00:00 committed by Gitee
commit 61c189b7eb
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
3 changed files with 786 additions and 2 deletions

View File

@ -0,0 +1,144 @@
From cba06f2a50f09b9f35d8dd748be48b5e8d53fe03 Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor <iant@golang.org>
Date: Thu, 28 Mar 2024 17:25:23 +0800
Subject: [PATCH 1/2] [1.15 backport]runtime: decrement netpollWaiters in
netpollunblock
We used to decrement it in netpollgoready, but that missed
the common case of a descriptor becoming ready due to I/O.
All calls to netpollgoready go through netpollunblock,
so this shouldn't miss any decrements we missed before.
Fixes #60782
Change-Id: Ideefefa1ac96ca38e09fe2dd5d595c5dd7883237
Reviewed-on: https://go-review.googlesource.com/c/go/+/503923
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@google.com>
Run-TryBot: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Ian Lance Taylor <iant@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Auto-Submit: Ian Lance Taylor <iant@google.com>
---
src/runtime/crash_test.go | 9 +++
src/runtime/netpoll.go | 4 +-
src/runtime/testdata/testprognet/waiters.go | 69 +++++++++++++++++++++
3 files changed, 81 insertions(+), 1 deletion(-)
create mode 100644 src/runtime/testdata/testprognet/waiters.go
diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go
index 34f30c9..263bd54 100644
--- a/src/runtime/crash_test.go
+++ b/src/runtime/crash_test.go
@@ -781,3 +781,12 @@ func TestDoublePanic(t *testing.T) {
}
}
}
+
+func TestNetpollWaiters(t *testing.T) {
+ t.Parallel()
+ output := runTestProg(t, "testprognet", "NetpollWaiters")
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("output is not %q\n%s", want, output)
+ }
+}
diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go
index f296b0a..3611995 100644
--- a/src/runtime/netpoll.go
+++ b/src/runtime/netpoll.go
@@ -470,13 +470,15 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
// will check for timeout/cancel before waiting.
return nil
}
- var new uintptr
+ new := pdNil
if ioready {
new = pdReady
}
if atomic.Casuintptr(gpp, old, new) {
if old == pdWait {
old = 0
+ } else if old != 0 {
+ netpollWaiters.Add(-1)
}
return (*g)(unsafe.Pointer(old))
}
diff --git a/src/runtime/testdata/testprognet/waiters.go b/src/runtime/testdata/testprognet/waiters.go
new file mode 100644
index 0000000..480e872
--- /dev/null
+++ b/src/runtime/testdata/testprognet/waiters.go
@@ -0,0 +1,69 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "runtime/internal/atomic"
+ "sync"
+ "time"
+ _ "unsafe" // for go:linkname
+)
+
+// The bug is that netpollWaiters increases monotonically.
+// This doesn't cause a problem until it overflows.
+// Use linkname to see the value.
+//go:linkname netpollWaiters runtime.netpollWaiters
+var netpollWaiters uint32
+
+func init() {
+ register("NetpollWaiters", NetpollWaiters)
+}
+
+func NetpollWaiters() {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ conn, err := listener.Accept()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer conn.Close()
+ if _, err := io.Copy(io.Discard, conn); err != nil {
+ log.Fatal(err)
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ conn, err := net.Dial("tcp", listener.Addr().String())
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer conn.Close()
+ for i := 0; i < 10; i++ {
+ fmt.Fprintf(conn, "%d\n", i)
+ time.Sleep(time.Millisecond)
+ }
+ }()
+
+ wg.Wait()
+ if v := atomic.Load(&netpollWaiters); v != 0 {
+ log.Fatalf("current waiters %v", v)
+ }
+
+ fmt.Println("OK")
+}
+
--
2.27.0

View File

@ -0,0 +1,632 @@
From b90ed661ea423daf7a3e3f7bac8abb25add7e01d Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor <iant@golang.org>
Date: Thu, 28 Mar 2024 17:28:13 +0800
Subject: [PATCH 2/2] [1.15 backport]runtime: adjust netpollWaiters after
goroutines are ready
The runtime was adjusting netpollWaiters before the waiting
goroutines were marked as ready. This could cause the scheduler
to report a deadlock because there were no goroutines ready to run.
Keeping netpollWaiters non-zero ensures that at least one goroutine
will call netpoll(-1) from findRunnable.
This does mean that if a program has network activity for a while
and then never has it again, and also has no timers, then we can leave
an M stranded in a call to netpoll from which it will never return.
At least this won't be a common case. And it's not new; this has been
a potential problem for some time.
Fixes #61454
Change-Id: I17c7f891c2bb1262fda12c6929664e64686463c8
Reviewed-on: https://go-review.googlesource.com/c/go/+/511455
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Ian Lance Taylor <iant@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Auto-Submit: Ian Lance Taylor <iant@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
---
src/runtime/netpoll.go | 68 +++++++++++++++++++++++++---------
src/runtime/netpoll_aix.go | 11 +++---
src/runtime/netpoll_epoll.go | 11 +++---
src/runtime/netpoll_fake.go | 4 +-
src/runtime/netpoll_kqueue.go | 11 +++---
src/runtime/netpoll_solaris.go | 11 +++---
src/runtime/netpoll_stub.go | 12 ++++--
src/runtime/netpoll_windows.go | 15 ++++----
src/runtime/proc.go | 22 +++++++----
9 files changed, 107 insertions(+), 58 deletions(-)
diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go
index 3611995..5f74422 100644
--- a/src/runtime/netpoll.go
+++ b/src/runtime/netpoll.go
@@ -22,10 +22,12 @@ import (
// Arm edge-triggered notifications for fd. The pd argument is to pass
// back to netpollready when fd is ready. Return an errno value.
//
-// func netpoll(delta int64) gList
+// func netpoll(delta int64) (gList, int32)
// Poll the network. If delta < 0, block indefinitely. If delta == 0,
// poll without blocking. If delta > 0, block for up to delta nanoseconds.
-// Return a list of goroutines built by calling netpollready.
+// Return a list of goroutines built by calling netpollready,
+// and a delta to add to netpollWaiters when all goroutines are ready.
+// This will never return an empty list with a non-zero delta.
//
// func netpollBreak()
// Wake up the network poller, assumed to be blocked in netpoll.
@@ -53,8 +55,9 @@ const (
// G pointer - the goroutine is blocked on the semaphore;
// io notification or timeout/close changes the state to pdReady or nil respectively
// and unparks the goroutine.
-// nil - none of the above.
+// pdNil - none of the above.
const (
+ pdNil uintptr = 0
pdReady uintptr = 1
pdWait uintptr = 2
)
@@ -308,14 +311,16 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
}
}
// If we set the new deadline in the past, unblock currently pending IO if any.
+ // Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd.
+ delta := int32(0)
var rg, wg *g
if pd.rd < 0 || pd.wd < 0 {
atomic.StorepNoWB(noescape(unsafe.Pointer(&wg)), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
if pd.rd < 0 {
- rg = netpollunblock(pd, 'r', false)
+ rg = netpollunblock(pd, 'r', false, &delta)
}
if pd.wd < 0 {
- wg = netpollunblock(pd, 'w', false)
+ wg = netpollunblock(pd, 'w', false, &delta)
}
}
unlock(&pd.lock)
@@ -325,6 +330,7 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
if wg != nil {
netpollgoready(wg, 3)
}
+ netpollAdjustWaiters(delta)
}
//go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock
@@ -338,8 +344,9 @@ func poll_runtime_pollUnblock(pd *pollDesc) {
pd.wseq++
var rg, wg *g
atomic.StorepNoWB(noescape(unsafe.Pointer(&rg)), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock
- rg = netpollunblock(pd, 'r', false)
- wg = netpollunblock(pd, 'w', false)
+ delta := int32(0)
+ rg = netpollunblock(pd, 'r', false, &delta)
+ wg = netpollunblock(pd, 'w', false, &delta)
if pd.rt.f != nil {
deltimer(&pd.rt)
pd.rt.f = nil
@@ -355,6 +362,7 @@ func poll_runtime_pollUnblock(pd *pollDesc) {
if wg != nil {
netpollgoready(wg, 3)
}
+ netpollAdjustWaiters(delta)
}
// netpollready is called by the platform-specific netpoll function.
@@ -363,15 +371,18 @@ func poll_runtime_pollUnblock(pd *pollDesc) {
// from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate
// whether the fd is ready for reading or writing or both.
//
+// This returns a delta to apply to netpollWaiters.
+//
// This may run while the world is stopped, so write barriers are not allowed.
//go:nowritebarrier
-func netpollready(toRun *gList, pd *pollDesc, mode int32) {
+func netpollready(toRun *gList, pd *pollDesc, mode int32) int32 {
+ delta := int32(0)
var rg, wg *g
if mode == 'r' || mode == 'r'+'w' {
- rg = netpollunblock(pd, 'r', true)
+ rg = netpollunblock(pd, 'r', true, &delta)
}
if mode == 'w' || mode == 'r'+'w' {
- wg = netpollunblock(pd, 'w', true)
+ wg = netpollunblock(pd, 'w', true, &delta)
}
if rg != nil {
toRun.push(rg)
@@ -379,6 +390,7 @@ func netpollready(toRun *gList, pd *pollDesc, mode int32) {
if wg != nil {
toRun.push(wg)
}
+ return delta
}
func netpollcheckerr(pd *pollDesc, mode int32) int {
@@ -403,7 +415,7 @@ func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
// Bump the count of goroutines waiting for the poller.
// The scheduler uses this to decide whether to block
// waiting for the poller if there is nothing else to do.
- atomic.Xadd(&netpollWaiters, 1)
+ netpollAdjustWaiters(1)
}
return r
}
@@ -454,7 +466,13 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
return old == pdReady
}
-func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
+// netpollunblock moves either pd.rg (if mode == 'r') or
+// pd.wg (if mode == 'w') into the pdReady state.
+// This returns any goroutine blocked on pd.{rg,wg}.
+// It adds any adjustment to netpollWaiters to *delta;
+// this adjustment should be applied after the goroutine has
+// been marked ready.
+func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g {
gpp := &pd.rg
if mode == 'w' {
gpp = &pd.wg
@@ -465,7 +483,7 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
if old == pdReady {
return nil
}
- if old == 0 && !ioready {
+ if old == pdNil && !ioready {
// Only set pdReady for ioready. runtime_pollWait
// will check for timeout/cancel before waiting.
return nil
@@ -476,9 +494,9 @@ func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
}
if atomic.Casuintptr(gpp, old, new) {
if old == pdWait {
- old = 0
- } else if old != 0 {
- netpollWaiters.Add(-1)
+ old = pdNil
+ } else if old != pdNil {
+ *delta -= 1
}
return (*g)(unsafe.Pointer(old))
}
@@ -498,6 +516,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
unlock(&pd.lock)
return
}
+ delta := int32(0)
var rg *g
if read {
if pd.rd <= 0 || pd.rt.f == nil {
@@ -505,7 +524,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
}
pd.rd = -1
atomic.StorepNoWB(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
- rg = netpollunblock(pd, 'r', false)
+ rg = netpollunblock(pd, 'r', false, &delta)
}
var wg *g
if write {
@@ -514,7 +533,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
}
pd.wd = -1
atomic.StorepNoWB(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
- wg = netpollunblock(pd, 'w', false)
+ wg = netpollunblock(pd, 'w', false, &delta)
}
unlock(&pd.lock)
if rg != nil {
@@ -523,6 +542,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
if wg != nil {
netpollgoready(wg, 0)
}
+ netpollAdjustWaiters(delta)
}
func netpollDeadline(arg interface{}, seq uintptr) {
@@ -537,6 +557,18 @@ func netpollWriteDeadline(arg interface{}, seq uintptr) {
netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
}
+// netpollAnyWaiters reports whether any goroutines are waiting for I/O.
+func netpollAnyWaiters() bool {
+ return atomic.Load(&netpollWaiters) > 0
+}
+
+// netpollAdjustWaiters adds delta to netpollWaiters.
+func netpollAdjustWaiters(delta int32) {
+ if delta != 0 {
+ atomic.Xadd(&netpollWaiters, delta)
+ }
+}
+
func (c *pollCache) alloc() *pollDesc {
lock(&c.lock)
if c.first == nil {
diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go
index 4590ed8..a94e098 100644
--- a/src/runtime/netpoll_aix.go
+++ b/src/runtime/netpoll_aix.go
@@ -147,13 +147,13 @@ func netpollBreak() {
// delay == 0: does not block, just polls
// delay > 0: block for up to that many nanoseconds
//go:nowritebarrierrec
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
var timeout uintptr
if delay < 0 {
timeout = ^uintptr(0)
} else if delay == 0 {
// TODO: call poll with timeout == 0
- return gList{}
+ return gList{}, 0
} else if delay < 1e6 {
timeout = 1
} else if delay < 1e15 {
@@ -179,7 +179,7 @@ retry:
// If a timed sleep was interrupted, just return to
// recalculate how long we should sleep now.
if timeout > 0 {
- return gList{}
+ return gList{}, 0
}
goto retry
}
@@ -199,6 +199,7 @@ retry:
n--
}
var toRun gList
+ delta := int32(0)
for i := 1; i < len(pfds) && n > 0; i++ {
pfd := &pfds[i]
@@ -216,10 +217,10 @@ retry:
if pfd.revents == _POLLERR {
pds[i].everr = true
}
- netpollready(&toRun, pds[i], mode)
+ delta += netpollready(&toRun, pds[i], mode)
n--
}
}
unlock(&mtxset)
- return toRun
+ return toRun, delta
}
diff --git a/src/runtime/netpoll_epoll.go b/src/runtime/netpoll_epoll.go
index 58f4fa8..2fd7caa 100644
--- a/src/runtime/netpoll_epoll.go
+++ b/src/runtime/netpoll_epoll.go
@@ -103,9 +103,9 @@ func netpollBreak() {
// delay < 0: blocks indefinitely
// delay == 0: does not block, just polls
// delay > 0: block for up to that many nanoseconds
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
if epfd == -1 {
- return gList{}
+ return gList{}, 0
}
var waitms int32
if delay < 0 {
@@ -132,11 +132,12 @@ retry:
// If a timed sleep was interrupted, just return to
// recalculate how long we should sleep now.
if waitms > 0 {
- return gList{}
+ return gList{}, 0
}
goto retry
}
var toRun gList
+ delta := int32(0)
for i := int32(0); i < n; i++ {
ev := &events[i]
if ev.events == 0 {
@@ -172,8 +173,8 @@ retry:
if ev.events == _EPOLLERR {
pd.everr = true
}
- netpollready(&toRun, pd, mode)
+ delta += netpollready(&toRun, pd, mode)
}
}
- return toRun
+ return toRun, delta
}
diff --git a/src/runtime/netpoll_fake.go b/src/runtime/netpoll_fake.go
index b2af3b8..fda8a5c 100644
--- a/src/runtime/netpoll_fake.go
+++ b/src/runtime/netpoll_fake.go
@@ -30,6 +30,6 @@ func netpollarm(pd *pollDesc, mode int) {
func netpollBreak() {
}
-func netpoll(delay int64) gList {
- return gList{}
+func netpoll(delay int64) (gList, int32) {
+ return gList{}, 0
}
diff --git a/src/runtime/netpoll_kqueue.go b/src/runtime/netpoll_kqueue.go
index 3bd93c1..995a880 100644
--- a/src/runtime/netpoll_kqueue.go
+++ b/src/runtime/netpoll_kqueue.go
@@ -104,9 +104,9 @@ func netpollBreak() {
// delay < 0: blocks indefinitely
// delay == 0: does not block, just polls
// delay > 0: block for up to that many nanoseconds
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
if kq == -1 {
- return gList{}
+ return gList{}, 0
}
var tp *timespec
var ts timespec
@@ -133,11 +133,12 @@ retry:
// If a timed sleep was interrupted, just return to
// recalculate how long we should sleep now.
if delay > 0 {
- return gList{}
+ return gList{}, 0
}
goto retry
}
var toRun gList
+ delta := int32(0)
for i := 0; i < int(n); i++ {
ev := &events[i]
@@ -183,8 +184,8 @@ retry:
if ev.flags == _EV_ERROR {
pd.everr = true
}
- netpollready(&toRun, pd, mode)
+ delta += netpollready(&toRun, pd, mode)
}
}
- return toRun
+ return toRun, delta
}
diff --git a/src/runtime/netpoll_solaris.go b/src/runtime/netpoll_solaris.go
index d217d5b..dbfa162 100644
--- a/src/runtime/netpoll_solaris.go
+++ b/src/runtime/netpoll_solaris.go
@@ -211,9 +211,9 @@ func netpollBreak() {
// delay < 0: blocks indefinitely
// delay == 0: does not block, just polls
// delay > 0: block for up to that many nanoseconds
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
if portfd == -1 {
- return gList{}
+ return gList{}, 0
}
var wait *timespec
@@ -251,12 +251,13 @@ retry:
// If a timed sleep was interrupted and there are no events,
// just return to recalculate how long we should sleep now.
if delay > 0 {
- return gList{}
+ return gList{}, 0
}
goto retry
}
var toRun gList
+ delta := int32(0)
for i := 0; i < int(n); i++ {
ev := &events[i]
@@ -311,9 +312,9 @@ retry:
// about the event port on SmartOS.
//
// See golang.org/x/issue/30840.
- netpollready(&toRun, pd, mode)
+ delta += netpollready(&toRun, pd, mode)
}
}
- return toRun
+ return toRun, delta
}
diff --git a/src/runtime/netpoll_stub.go b/src/runtime/netpoll_stub.go
index 3599f2d..ed0bf5e 100644
--- a/src/runtime/netpoll_stub.go
+++ b/src/runtime/netpoll_stub.go
@@ -9,7 +9,6 @@ package runtime
import "runtime/internal/atomic"
var netpollInited uint32
-var netpollWaiters uint32
var netpollStubLock mutex
var netpollNote note
@@ -34,7 +33,7 @@ func netpollBreak() {
// Polls for ready network connections.
// Returns list of goroutines that become runnable.
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
// Implementation for platforms that do not support
// integrated network poller.
if delay != 0 {
@@ -53,9 +52,16 @@ func netpoll(delay int64) gList {
// (eg when running TestNetpollBreak).
osyield()
}
- return gList{}
+ return gList{}, 0
}
func netpollinited() bool {
return atomic.Load(&netpollInited) != 0
}
+
+func netpollAnyWaiters() bool {
+ return false
+}
+
+func netpollAdjustWaiters(delta int32) {
+}
diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go
index 4c1cd26..3782254 100644
--- a/src/runtime/netpoll_windows.go
+++ b/src/runtime/netpoll_windows.go
@@ -80,7 +80,7 @@ func netpollBreak() {
// delay < 0: blocks indefinitely
// delay == 0: does not block, just polls
// delay > 0: block for up to that many nanoseconds
-func netpoll(delay int64) gList {
+func netpoll(delay int64) (gList, int32) {
var entries [64]overlappedEntry
var wait, qty, flags, n, i uint32
var errno int32
@@ -90,7 +90,7 @@ func netpoll(delay int64) gList {
mp := getg().m
if iocphandle == _INVALID_HANDLE_VALUE {
- return gList{}
+ return gList{}, 0
}
if delay < 0 {
wait = _INFINITE
@@ -117,12 +117,13 @@ func netpoll(delay int64) gList {
mp.blocked = false
errno = int32(getlasterror())
if errno == _WAIT_TIMEOUT {
- return gList{}
+ return gList{}, 0
}
println("runtime: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
throw("runtime: netpoll failed")
}
mp.blocked = false
+ dleta := int32(0)
for i = 0; i < n; i++ {
op = entries[i].op
if op != nil {
@@ -131,7 +132,7 @@ func netpoll(delay int64) gList {
if stdcall5(_WSAGetOverlappedResult, op.pd.fd, uintptr(unsafe.Pointer(op)), uintptr(unsafe.Pointer(&qty)), 0, uintptr(unsafe.Pointer(&flags))) == 0 {
errno = int32(getlasterror())
}
- handlecompletion(&toRun, op, errno, qty)
+ delta += handlecompletion(&toRun, op, errno, qty)
} else {
atomic.Store(&netpollWakeSig, 0)
if delay == 0 {
@@ -141,10 +142,10 @@ func netpoll(delay int64) gList {
}
}
}
- return toRun
+ return toRun, delta
}
-func handlecompletion(toRun *gList, op *net_op, errno int32, qty uint32) {
+func handlecompletion(toRun *gList, op *net_op, errno int32, qty uint32) int32 {
mode := op.mode
if mode != 'r' && mode != 'w' {
println("runtime: GetQueuedCompletionStatusEx returned invalid mode=", mode)
@@ -152,5 +153,5 @@ func handlecompletion(toRun *gList, op *net_op, errno int32, qty uint32) {
}
op.errno = errno
op.qty = qty
- netpollready(toRun, op.pd, mode)
+ return netpollready(toRun, op.pd, mode)
}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index c0b961f..a520cba 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -1113,8 +1113,9 @@ func stopTheWorldWithSema() {
func startTheWorldWithSema(emitTraceEvent bool) int64 {
mp := acquirem() // disable preemption because it can be holding p in a local var
if netpollinited() {
- list := netpoll(0) // non-blocking
+ list, delta := netpoll(0) // non-blocking
injectglist(&list)
+ netpollAdjustWaiters(delta)
}
lock(&sched.lock)
@@ -2297,10 +2298,11 @@ top:
// blocked thread (e.g. it has already returned from netpoll, but does
// not set lastpoll yet), this thread will do blocking netpoll below
// anyway.
- if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
- if list := netpoll(0); !list.empty() { // non-blocking
+ if netpollinited() && netpollAnyWaiters() && atomic.Load64(&sched.lastpoll) != 0 {
+ if list, delta := netpoll(0); !list.empty() { // non-blocking
gp := list.pop()
injectglist(&list)
+ netpollAdjustWaiters(delta)
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(gp, 0)
@@ -2494,7 +2496,7 @@ stop:
}
// poll network
- if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
+ if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
atomic.Store64(&sched.pollUntil, uint64(pollUntil))
if _g_.m.p != 0 {
throw("findrunnable: netpoll with p")
@@ -2506,7 +2508,7 @@ stop:
// When using fake time, just poll.
delta = 0
}
- list := netpoll(delta) // block until new work is available
+ list, delta := netpoll(delta) // block until new work is available
atomic.Store64(&sched.pollUntil, 0)
atomic.Store64(&sched.lastpoll, uint64(nanotime()))
if faketime != 0 && list.empty() {
@@ -2520,11 +2522,13 @@ stop:
unlock(&sched.lock)
if _p_ == nil {
injectglist(&list)
+ netpollAdjustWaiters(delta)
} else {
acquirep(_p_)
if !list.empty() {
gp := list.pop()
injectglist(&list)
+ netpollAdjustWaiters(delta)
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(gp, 0)
@@ -2559,9 +2563,10 @@ func pollWork() bool {
if !runqempty(p) {
return true
}
- if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
- if list := netpoll(0); !list.empty() {
+ if netpollinited() && netpollAnyWaiters() && sched.lastpoll != 0 {
+ if list, delta := netpoll(0); !list.empty() {
injectglist(&list)
+ netpollAdjustWaiters(delta)
return true
}
}
@@ -4768,7 +4773,7 @@ func sysmon() {
lastpoll := int64(atomic.Load64(&sched.lastpoll))
if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
- list := netpoll(0) // non-blocking - returns list of goroutines
+ list, delta := netpoll(0) // non-blocking - returns list of goroutines
if !list.empty() {
// Need to decrement number of idle locked M's
// (pretending that one more is running) before injectglist.
@@ -4780,6 +4785,7 @@ func sysmon() {
incidlelocked(-1)
injectglist(&list)
incidlelocked(1)
+ netpollAdjustWaiters(delta)
}
}
if next < now {
--
2.27.0

View File

@ -58,7 +58,7 @@
Name: golang
Version: 1.15.7
Release: 41
Release: 42
Summary: The Go Programming Language
License: BSD and Public Domain
URL: https://golang.org/
@ -259,6 +259,8 @@ Patch6114: 0114-release-branch.go1.21-html-template-escape-additiona.patch
Patch6115: 0115-net-textproto-mime-multipart-avoid-unbounded-read-in.patch
Patch6116: 0116-release-branch.go1.21-net-http-net-http-cookiejar-av.patch
Patch6117: 0117-Backport-net-mail-properly-handle-special-characters.patch
Patch6118: 0118-1.15-backport-runtime-decrement-netpollWaiters-in-ne.patch
Patch6119: 0119-1.15-backport-runtime-adjust-netpollWaiters-after-go.patch
Patch9001: 0001-drop-hard-code-cert.patch
Patch9002: 0002-fix-patch-cmd-go-internal-modfetch-do-not-sho.patch
@ -498,6 +500,12 @@ fi
%files devel -f go-tests.list -f go-misc.list -f go-src.list
%changelog
* Thu Mar 28 2024 wangshuo <wangshuo@kylinos.cn> - 1.15.7-42
- Type:bugfix
- ID:NA
- SUG:NA
- DESC:backport the upstream patch, fix the overflow issue in runtime.netpollWaiters
* Thu Mar 28 2024 hanchao <hanchao63@huawei.com> - 1.15.7-41
- Type:CVE
- CVE:CVE-2024-24784
@ -633,7 +641,7 @@ fi
- SUG:NA
- DESC:fix CVE-2022-27664
* Thu Spe 8 2022 hanchao<hanchao47@huawei.com> - 1.15.7-18
* Thu Sep 8 2022 hanchao<hanchao47@huawei.com> - 1.15.7-18
- Type:bugfix
- CVE:NA
- SUG:NA