QEMU update to version 4.1.0-86:
- nbd/server: CVE-2024-7409: Close stray clients at server-stop - main-loop.h: introduce qemu_in_main_thread() - aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED - nbd/server: CVE-2024-7409: Drop non-negotiating clients - nbd/server: CVE-2024-7409: Cap default max-connections to 100 - nbd: Add max-connections to nbd-server-start - nbd/server: Plumb in new args to nbd_client_add() - nbd: Minor style and typo fixes Signed-off-by: Jiabo Feng <fengjiabo1@huawei.com>
This commit is contained in:
parent
fe82f9e345
commit
c4360a84fc
79
aio-wait.h-introduce-AIO_WAIT_WHILE_UNLOCKED.patch
Normal file
79
aio-wait.h-introduce-AIO_WAIT_WHILE_UNLOCKED.patch
Normal file
@ -0,0 +1,79 @@
|
||||
From 3257ee8757c6187901cbe287fbd3e7fecc321f37 Mon Sep 17 00:00:00 2001
|
||||
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Date: Mon, 26 Sep 2022 05:31:57 -0400
|
||||
Subject: [PATCH 6/8] aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED
|
||||
|
||||
Same as AIO_WAIT_WHILE macro, but if we are in the Main loop
|
||||
do not release and then acquire ctx_ 's aiocontext.
|
||||
|
||||
Once all Aiocontext locks go away, this macro will replace
|
||||
AIO_WAIT_WHILE.
|
||||
|
||||
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
|
||||
Message-Id: <20220926093214.506243-5-eesposit@redhat.com>
|
||||
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
|
||||
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
||||
---
|
||||
include/block/aio-wait.h | 17 +++++++++++++----
|
||||
1 file changed, 13 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
|
||||
index 716d2639df..3c0cad35fb 100644
|
||||
--- a/include/block/aio-wait.h
|
||||
+++ b/include/block/aio-wait.h
|
||||
@@ -59,10 +59,13 @@ typedef struct {
|
||||
extern AioWait global_aio_wait;
|
||||
|
||||
/**
|
||||
- * AIO_WAIT_WHILE:
|
||||
+ * AIO_WAIT_WHILE_INTERNAL:
|
||||
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
|
||||
* caller does not hold a lock) are involved in the polling condition.
|
||||
* @cond: wait while this conditional expression is true
|
||||
+ * @unlock: whether to unlock and then lock again @ctx. This apples
|
||||
+ * only when waiting for another AioContext from the main loop.
|
||||
+ * Otherwise it's ignored.
|
||||
*
|
||||
* Wait while a condition is true. Use this to implement synchronous
|
||||
* operations that require event loop activity.
|
||||
@@ -75,7 +78,7 @@ extern AioWait global_aio_wait;
|
||||
* wait on conditions between two IOThreads since that could lead to deadlock,
|
||||
* go via the main loop instead.
|
||||
*/
|
||||
-#define AIO_WAIT_WHILE(ctx, cond) ({ \
|
||||
+#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \
|
||||
bool waited_ = false; \
|
||||
AioWait *wait_ = &global_aio_wait; \
|
||||
AioContext *ctx_ = (ctx); \
|
||||
@@ -90,11 +93,11 @@ extern AioWait global_aio_wait;
|
||||
assert(qemu_get_current_aio_context() == \
|
||||
qemu_get_aio_context()); \
|
||||
while ((cond)) { \
|
||||
- if (ctx_) { \
|
||||
+ if (unlock && ctx_) { \
|
||||
aio_context_release(ctx_); \
|
||||
} \
|
||||
aio_poll(qemu_get_aio_context(), true); \
|
||||
- if (ctx_) { \
|
||||
+ if (unlock && ctx_) { \
|
||||
aio_context_acquire(ctx_); \
|
||||
} \
|
||||
waited_ = true; \
|
||||
@@ -103,6 +106,12 @@ extern AioWait global_aio_wait;
|
||||
atomic_dec(&wait_->num_waiters); \
|
||||
waited_; })
|
||||
|
||||
+#define AIO_WAIT_WHILE(ctx, cond) \
|
||||
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond, true)
|
||||
+
|
||||
+#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \
|
||||
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond, false)
|
||||
+
|
||||
/**
|
||||
* aio_wait_kick:
|
||||
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
|
||||
--
|
||||
2.45.1.windows.1
|
||||
|
||||
116
main-loop.h-introduce-qemu_in_main_thread.patch
Normal file
116
main-loop.h-introduce-qemu_in_main_thread.patch
Normal file
@ -0,0 +1,116 @@
|
||||
From f0cbb49e2a44bcf5a515922b96853acc1bed3b79 Mon Sep 17 00:00:00 2001
|
||||
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Date: Thu, 3 Mar 2022 10:15:46 -0500
|
||||
Subject: [PATCH 7/8] main-loop.h: introduce qemu_in_main_thread()
|
||||
|
||||
When invoked from the main loop, this function is the same
|
||||
as qemu_mutex_iothread_locked, and returns true if the BQL is held.
|
||||
When invoked from iothreads or tests, it returns true only
|
||||
if the current AioContext is the Main Loop.
|
||||
|
||||
This essentially just extends qemu_mutex_iothread_locked to work
|
||||
also in unit tests or other users like storage-daemon, that run
|
||||
in the Main Loop but end up using the implementation in
|
||||
stubs/iothread-lock.c.
|
||||
|
||||
Using qemu_mutex_iothread_locked in unit tests defaults to false
|
||||
because they use the implementation in stubs/iothread-lock,
|
||||
making all assertions added in next patches fail despite the
|
||||
AioContext is still the main loop.
|
||||
|
||||
See the comment in the function header for more information.
|
||||
|
||||
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
Message-Id: <20220303151616.325444-2-eesposit@redhat.com>
|
||||
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
||||
---
|
||||
cpus.c | 5 +++++
|
||||
include/qemu/main-loop.h | 24 ++++++++++++++++++++++++
|
||||
stubs/Makefile.objs | 1 +
|
||||
stubs/iothread-lock-block.c | 8 ++++++++
|
||||
4 files changed, 38 insertions(+)
|
||||
create mode 100644 stubs/iothread-lock-block.c
|
||||
|
||||
diff --git a/cpus.c b/cpus.c
|
||||
index b2a26a1f11..d2d486129d 100644
|
||||
--- a/cpus.c
|
||||
+++ b/cpus.c
|
||||
@@ -1848,6 +1848,11 @@ bool qemu_mutex_iothread_locked(void)
|
||||
return iothread_locked;
|
||||
}
|
||||
|
||||
+bool qemu_in_main_thread(void)
|
||||
+{
|
||||
+ return qemu_mutex_iothread_locked();
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* The BQL is taken from so many places that it is worth profiling the
|
||||
* callers directly, instead of funneling them all through a single function.
|
||||
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
|
||||
index cba048bc82..42d65cf270 100644
|
||||
--- a/include/qemu/main-loop.h
|
||||
+++ b/include/qemu/main-loop.h
|
||||
@@ -260,9 +260,33 @@ int qemu_add_child_watch(pid_t pid);
|
||||
* must always be taken outside other locks. This function helps
|
||||
* functions take different paths depending on whether the current
|
||||
* thread is running within the main loop mutex.
|
||||
+ *
|
||||
+ * This function should never be used in the block layer, because
|
||||
+ * unit tests, block layer tools and qemu-storage-daemon do not
|
||||
+ * have a BQL.
|
||||
+ * Please instead refer to qemu_in_main_thread().
|
||||
*/
|
||||
bool qemu_mutex_iothread_locked(void);
|
||||
|
||||
+/**
|
||||
+ * qemu_in_main_thread: return whether it's possible to safely access
|
||||
+ * the global state of the block layer.
|
||||
+ *
|
||||
+ * Global state of the block layer is not accessible from I/O threads
|
||||
+ * or worker threads; only from threads that "own" the default
|
||||
+ * AioContext that qemu_get_aio_context() returns. For tests, block
|
||||
+ * layer tools and qemu-storage-daemon there is a designated thread that
|
||||
+ * runs the event loop for qemu_get_aio_context(), and that is the
|
||||
+ * main thread.
|
||||
+ *
|
||||
+ * For emulators, however, any thread that holds the BQL can act
|
||||
+ * as the block layer main thread; this will be any of the actual
|
||||
+ * main thread, the vCPU threads or the RCU thread.
|
||||
+ *
|
||||
+ * For clarity, do not use this function outside the block layer.
|
||||
+ */
|
||||
+bool qemu_in_main_thread(void);
|
||||
+
|
||||
/**
|
||||
* qemu_mutex_lock_iothread: Lock the main loop mutex.
|
||||
*
|
||||
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
|
||||
index 9c7393b08c..e748ccd323 100644
|
||||
--- a/stubs/Makefile.objs
|
||||
+++ b/stubs/Makefile.objs
|
||||
@@ -11,6 +11,7 @@ stub-obj-y += gdbstub.o
|
||||
stub-obj-y += get-vm-name.o
|
||||
stub-obj-y += iothread.o
|
||||
stub-obj-y += iothread-lock.o
|
||||
+stub-obj-y += iothread-lock-block.o
|
||||
stub-obj-y += is-daemonized.o
|
||||
stub-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
|
||||
stub-obj-y += machine-init-done.o
|
||||
diff --git a/stubs/iothread-lock-block.c b/stubs/iothread-lock-block.c
|
||||
new file mode 100644
|
||||
index 0000000000..c88ed70462
|
||||
--- /dev/null
|
||||
+++ b/stubs/iothread-lock-block.c
|
||||
@@ -0,0 +1,8 @@
|
||||
+#include "qemu/osdep.h"
|
||||
+#include "qemu/main-loop.h"
|
||||
+
|
||||
+bool qemu_in_main_thread(void)
|
||||
+{
|
||||
+ return qemu_get_current_aio_context() == qemu_get_aio_context();
|
||||
+}
|
||||
+
|
||||
--
|
||||
2.45.1.windows.1
|
||||
|
||||
167
nbd-Add-max-connections-to-nbd-server-start.patch
Normal file
167
nbd-Add-max-connections-to-nbd-server-start.patch
Normal file
@ -0,0 +1,167 @@
|
||||
From 6a3b58ac04f70089d2a96a874d7213f63d808093 Mon Sep 17 00:00:00 2001
|
||||
From: Kevin Wolf <kwolf@redhat.com>
|
||||
Date: Thu, 24 Sep 2020 17:26:54 +0200
|
||||
Subject: [PATCH 3/8] nbd: Add max-connections to nbd-server-start
|
||||
|
||||
This is a QMP equivalent of qemu-nbd's --shared option, limiting the
|
||||
maximum number of clients that can attach at the same time.
|
||||
|
||||
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
||||
Reviewed-by: Max Reitz <mreitz@redhat.com>
|
||||
Reviewed-by: Eric Blake <eblake@redhat.com>
|
||||
Message-Id: <20200924152717.287415-9-kwolf@redhat.com>
|
||||
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
|
||||
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
|
||||
Signed-off-by: liuxiangdong <liuxiangdong5@huawei.com>
|
||||
---
|
||||
blockdev-nbd.c | 30 ++++++++++++++++++++++++------
|
||||
include/block/nbd.h | 3 ++-
|
||||
monitor/hmp-cmds.c | 2 +-
|
||||
qapi/block.json | 5 ++++-
|
||||
4 files changed, 31 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
|
||||
index 0c14f033d2..c73a39fae9 100644
|
||||
--- a/blockdev-nbd.c
|
||||
+++ b/blockdev-nbd.c
|
||||
@@ -24,18 +24,28 @@ typedef struct NBDServerData {
|
||||
QIONetListener *listener;
|
||||
QCryptoTLSCreds *tlscreds;
|
||||
char *tlsauthz;
|
||||
+ uint32_t max_connections;
|
||||
+ uint32_t connections;
|
||||
} NBDServerData;
|
||||
|
||||
static NBDServerData *nbd_server;
|
||||
|
||||
+static void nbd_update_server_watch(NBDServerData *s);
|
||||
+
|
||||
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
|
||||
{
|
||||
nbd_client_put(client);
|
||||
+ assert(nbd_server->connections > 0);
|
||||
+ nbd_server->connections--;
|
||||
+ nbd_update_server_watch(nbd_server);
|
||||
}
|
||||
|
||||
static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
|
||||
gpointer opaque)
|
||||
{
|
||||
+ nbd_server->connections++;
|
||||
+ nbd_update_server_watch(nbd_server);
|
||||
+
|
||||
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
|
||||
/* TODO - expose handshake timeout as QMP option */
|
||||
nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
|
||||
@@ -43,6 +53,14 @@ static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
|
||||
nbd_blockdev_client_closed, NULL);
|
||||
}
|
||||
|
||||
+static void nbd_update_server_watch(NBDServerData *s)
|
||||
+{
|
||||
+ if (!s->max_connections || s->connections < s->max_connections) {
|
||||
+ qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, NULL);
|
||||
+ } else {
|
||||
+ qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL);
|
||||
+ }
|
||||
+}
|
||||
|
||||
static void nbd_server_free(NBDServerData *server)
|
||||
{
|
||||
@@ -91,7 +109,8 @@ static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
|
||||
|
||||
|
||||
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
||||
- const char *tls_authz, Error **errp)
|
||||
+ const char *tls_authz, uint32_t max_connections,
|
||||
+ Error **errp)
|
||||
{
|
||||
if (nbd_server) {
|
||||
error_setg(errp, "NBD server already running");
|
||||
@@ -99,6 +118,7 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
||||
}
|
||||
|
||||
nbd_server = g_new0(NBDServerData, 1);
|
||||
+ nbd_server->max_connections = max_connections;
|
||||
nbd_server->listener = qio_net_listener_new();
|
||||
|
||||
qio_net_listener_set_name(nbd_server->listener,
|
||||
@@ -123,10 +143,7 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
||||
|
||||
nbd_server->tlsauthz = g_strdup(tls_authz);
|
||||
|
||||
- qio_net_listener_set_client_func(nbd_server->listener,
|
||||
- nbd_accept,
|
||||
- NULL,
|
||||
- NULL);
|
||||
+ nbd_update_server_watch(nbd_server);
|
||||
|
||||
return;
|
||||
|
||||
@@ -138,11 +155,12 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
||||
void qmp_nbd_server_start(SocketAddressLegacy *addr,
|
||||
bool has_tls_creds, const char *tls_creds,
|
||||
bool has_tls_authz, const char *tls_authz,
|
||||
+ bool has_max_connections, uint32_t max_connections,
|
||||
Error **errp)
|
||||
{
|
||||
SocketAddress *addr_flat = socket_address_flatten(addr);
|
||||
|
||||
- nbd_server_start(addr_flat, tls_creds, tls_authz, errp);
|
||||
+ nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp);
|
||||
qapi_free_SocketAddress(addr_flat);
|
||||
}
|
||||
|
||||
diff --git a/include/block/nbd.h b/include/block/nbd.h
|
||||
index 68667c31c8..d6fd188546 100644
|
||||
--- a/include/block/nbd.h
|
||||
+++ b/include/block/nbd.h
|
||||
@@ -355,7 +355,8 @@ void nbd_client_get(NBDClient *client);
|
||||
void nbd_client_put(NBDClient *client);
|
||||
|
||||
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
|
||||
- const char *tls_authz, Error **errp);
|
||||
+ const char *tls_authz, uint32_t max_connections,
|
||||
+ Error **errp);
|
||||
|
||||
/* nbd_read
|
||||
* Reads @size bytes from @ioc. Returns 0 on success.
|
||||
diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c
|
||||
index 5ca3ebe942..bf468fe8eb 100644
|
||||
--- a/monitor/hmp-cmds.c
|
||||
+++ b/monitor/hmp-cmds.c
|
||||
@@ -2365,7 +2365,7 @@ void hmp_nbd_server_start(Monitor *mon, const QDict *qdict)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
- nbd_server_start(addr, NULL, NULL, &local_err);
|
||||
+ nbd_server_start(addr, NULL, NULL, 0, &local_err);
|
||||
qapi_free_SocketAddress(addr);
|
||||
if (local_err != NULL) {
|
||||
goto exit;
|
||||
diff --git a/qapi/block.json b/qapi/block.json
|
||||
index 145c268bb6..e25a2a75a4 100644
|
||||
--- a/qapi/block.json
|
||||
+++ b/qapi/block.json
|
||||
@@ -230,6 +230,8 @@
|
||||
# is only resolved at time of use, so can be deleted and
|
||||
# recreated on the fly while the NBD server is active.
|
||||
# If missing, it will default to denying access (since 4.0).
|
||||
+# @max-connections: The maximum number of connections to allow at the same
|
||||
+# time, 0 for unlimited. (since 5.2; default: 0)
|
||||
#
|
||||
# Returns: error if the server is already running.
|
||||
#
|
||||
@@ -238,7 +240,8 @@
|
||||
{ 'command': 'nbd-server-start',
|
||||
'data': { 'addr': 'SocketAddressLegacy',
|
||||
'*tls-creds': 'str',
|
||||
- '*tls-authz': 'str'} }
|
||||
+ '*tls-authz': 'str',
|
||||
+ '*max-connections': 'uint32' } }
|
||||
|
||||
##
|
||||
# @nbd-server-add:
|
||||
--
|
||||
2.45.1.windows.1
|
||||
|
||||
49
nbd-Minor-style-and-typo-fixes.patch
Normal file
49
nbd-Minor-style-and-typo-fixes.patch
Normal file
@ -0,0 +1,49 @@
|
||||
From fc5f3bf2fb7eac6b348fd75407c6fd102f8459da Mon Sep 17 00:00:00 2001
|
||||
From: Eric Blake <eblake@redhat.com>
|
||||
Date: Thu, 1 Aug 2024 16:49:20 -0500
|
||||
Subject: [PATCH 1/8] nbd: Minor style and typo fixes
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Touch up a comment with the wrong type name, and an over-long line,
|
||||
both noticed while working on upcoming patches.
|
||||
|
||||
Signed-off-by: Eric Blake <eblake@redhat.com>
|
||||
Message-ID: <20240807174943.771624-10-eblake@redhat.com>
|
||||
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
|
||||
---
|
||||
nbd/server.c | 2 +-
|
||||
qemu-nbd.c | 3 ++-
|
||||
2 files changed, 3 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/nbd/server.c b/nbd/server.c
|
||||
index 2d81248967..b617e6a1a1 100644
|
||||
--- a/nbd/server.c
|
||||
+++ b/nbd/server.c
|
||||
@@ -1583,7 +1583,7 @@ void nbd_export_close(NBDExport *exp)
|
||||
|
||||
nbd_export_get(exp);
|
||||
/*
|
||||
- * TODO: Should we expand QMP NbdServerRemoveNode enum to allow a
|
||||
+ * TODO: Should we expand QMP BlockExportRemoveMode enum to allow a
|
||||
* close mode that stops advertising the export to new clients but
|
||||
* still permits existing clients to run to completion? Because of
|
||||
* that possibility, nbd_export_close() can be called more than
|
||||
diff --git a/qemu-nbd.c b/qemu-nbd.c
|
||||
index a8cb39e510..2dc2e8ea55 100644
|
||||
--- a/qemu-nbd.c
|
||||
+++ b/qemu-nbd.c
|
||||
@@ -657,7 +657,8 @@ int main(int argc, char **argv)
|
||||
pthread_t client_thread;
|
||||
const char *fmt = NULL;
|
||||
Error *local_err = NULL;
|
||||
- BlockdevDetectZeroesOptions detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
|
||||
+ BlockdevDetectZeroesOptions detect_zeroes =
|
||||
+ BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
|
||||
QDict *options = NULL;
|
||||
const char *export_name = NULL; /* defaults to "" later for server mode */
|
||||
const char *export_description = NULL;
|
||||
--
|
||||
2.45.1.windows.1
|
||||
|
||||
139
nbd-server-CVE-2024-7409-Cap-default-max-connections.patch
Normal file
139
nbd-server-CVE-2024-7409-Cap-default-max-connections.patch
Normal file
@ -0,0 +1,139 @@
|
||||
From 6b5fc2e0040eca99684531565740236b3bb999eb Mon Sep 17 00:00:00 2001
|
||||
From: Eric Blake <eblake@redhat.com>
|
||||
Date: Tue, 6 Aug 2024 13:53:00 -0500
|
||||
Subject: [PATCH 4/8] nbd/server: CVE-2024-7409: Cap default max-connections to
|
||||
100
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Allowing an unlimited number of clients to any web service is a recipe
|
||||
for a rudimentary denial of service attack: the client merely needs to
|
||||
open lots of sockets without closing them, until qemu no longer has
|
||||
any more fds available to allocate.
|
||||
|
||||
For qemu-nbd, we default to allowing only 1 connection unless more are
|
||||
explicitly asked for (-e or --shared); this was historically picked as
|
||||
a nice default (without an explicit -t, a non-persistent qemu-nbd goes
|
||||
away after a client disconnects, without needing any additional
|
||||
follow-up commands), and we are not going to change that interface now
|
||||
(besides, someday we want to point people towards qemu-storage-daemon
|
||||
instead of qemu-nbd).
|
||||
|
||||
But for qemu proper, and the newer qemu-storage-daemon, the QMP
|
||||
nbd-server-start command has historically had a default of unlimited
|
||||
number of connections, in part because unlike qemu-nbd it is
|
||||
inherently persistent until nbd-server-stop. Allowing multiple client
|
||||
sockets is particularly useful for clients that can take advantage of
|
||||
MULTI_CONN (creating parallel sockets to increase throughput),
|
||||
although known clients that do so (such as libnbd's nbdcopy) typically
|
||||
use only 8 or 16 connections (the benefits of scaling diminish once
|
||||
more sockets are competing for kernel attention). Picking a number
|
||||
large enough for typical use cases, but not unlimited, makes it
|
||||
slightly harder for a malicious client to perform a denial of service
|
||||
merely by opening lots of connections withot progressing through the
|
||||
handshake.
|
||||
|
||||
This change does not eliminate CVE-2024-7409 on its own, but reduces
|
||||
the chance for fd exhaustion or unlimited memory usage as an attack
|
||||
surface. On the other hand, by itself, it makes it more obvious that
|
||||
with a finite limit, we have the problem of an unauthenticated client
|
||||
holding 100 fds opened as a way to block out a legitimate client from
|
||||
being able to connect; thus, later patches will further add timeouts
|
||||
to reject clients that are not making progress.
|
||||
|
||||
This is an INTENTIONAL change in behavior, and will break any client
|
||||
of nbd-server-start that was not passing an explicit max-connections
|
||||
parameter, yet expects more than 100 simultaneous connections. We are
|
||||
not aware of any such client (as stated above, most clients aware of
|
||||
MULTI_CONN get by just fine on 8 or 16 connections, and probably cope
|
||||
with later connections failing by relying on the earlier connections;
|
||||
libvirt has not yet been passing max-connections, but generally
|
||||
creates NBD servers with the intent for a single client for the sake
|
||||
of live storage migration; meanwhile, the KubeSAN project anticipates
|
||||
a large cluster sharing multiple clients [up to 8 per node, and up to
|
||||
100 nodes in a cluster], but it currently uses qemu-nbd with an
|
||||
explicit --shared=0 rather than qemu-storage-daemon with
|
||||
nbd-server-start).
|
||||
|
||||
We considered using a deprecation period (declare that omitting
|
||||
max-parameters is deprecated, and make it mandatory in 3 releases -
|
||||
then we don't need to pick an arbitrary default); that has zero risk
|
||||
of breaking any apps that accidentally depended on more than 100
|
||||
connections, and where such breakage might not be noticed under unit
|
||||
testing but only under the larger loads of production usage. But it
|
||||
does not close the denial-of-service hole until far into the future,
|
||||
and requires all apps to change to add the parameter even if 100 was
|
||||
good enough. It also has a drawback that any app (like libvirt) that
|
||||
is accidentally relying on an unlimited default should seriously
|
||||
consider their own CVE now, at which point they are going to change to
|
||||
pass explicit max-connections sooner than waiting for 3 qemu releases.
|
||||
Finally, if our changed default breaks an app, that app can always
|
||||
pass in an explicit max-parameters with a larger value.
|
||||
|
||||
It is also intentional that the HMP interface to nbd-server-start is
|
||||
not changed to expose max-connections (any client needing to fine-tune
|
||||
things should be using QMP).
|
||||
|
||||
Suggested-by: Daniel P. Berrangé <berrange@redhat.com>
|
||||
Signed-off-by: Eric Blake <eblake@redhat.com>
|
||||
Message-ID: <20240807174943.771624-12-eblake@redhat.com>
|
||||
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
|
||||
[ericb: Expand commit message to summarize Dan's argument for why we
|
||||
break corner-case back-compat behavior without a deprecation period]
|
||||
Signed-off-by: Eric Blake <eblake@redhat.com>
|
||||
---
|
||||
blockdev-nbd.c | 4 ++++
|
||||
include/block/nbd.h | 7 +++++++
|
||||
qapi/block.json | 2 +-
|
||||
3 files changed, 12 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
|
||||
index c73a39fae9..67bb3fe4ce 100644
|
||||
--- a/blockdev-nbd.c
|
||||
+++ b/blockdev-nbd.c
|
||||
@@ -160,6 +160,10 @@ void qmp_nbd_server_start(SocketAddressLegacy *addr,
|
||||
{
|
||||
SocketAddress *addr_flat = socket_address_flatten(addr);
|
||||
|
||||
+ if (!has_max_connections) {
|
||||
+ max_connections = NBD_DEFAULT_MAX_CONNECTIONS;
|
||||
+ }
|
||||
+
|
||||
nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp);
|
||||
qapi_free_SocketAddress(addr_flat);
|
||||
}
|
||||
diff --git a/include/block/nbd.h b/include/block/nbd.h
|
||||
index d6fd188546..d768ccb592 100644
|
||||
--- a/include/block/nbd.h
|
||||
+++ b/include/block/nbd.h
|
||||
@@ -31,6 +31,13 @@
|
||||
*/
|
||||
#define NBD_DEFAULT_HANDSHAKE_MAX_SECS 10
|
||||
|
||||
+/*
|
||||
+ * NBD_DEFAULT_MAX_CONNECTIONS: Number of client sockets to allow at
|
||||
+ * once; must be large enough to allow a MULTI_CONN-aware client like
|
||||
+ * nbdcopy to create its typical number of 8-16 sockets.
|
||||
+ */
|
||||
+#define NBD_DEFAULT_MAX_CONNECTIONS 100
|
||||
+
|
||||
/* Handshake phase structs - this struct is passed on the wire */
|
||||
|
||||
struct NBDOption {
|
||||
diff --git a/qapi/block.json b/qapi/block.json
|
||||
index e25a2a75a4..9e1356409b 100644
|
||||
--- a/qapi/block.json
|
||||
+++ b/qapi/block.json
|
||||
@@ -231,7 +231,7 @@
|
||||
# recreated on the fly while the NBD server is active.
|
||||
# If missing, it will default to denying access (since 4.0).
|
||||
# @max-connections: The maximum number of connections to allow at the same
|
||||
-# time, 0 for unlimited. (since 5.2; default: 0)
|
||||
+# time, 0 for unlimited. (since 5.2; default: 100)
|
||||
#
|
||||
# Returns: error if the server is already running.
|
||||
#
|
||||
--
|
||||
2.45.1.windows.1
|
||||
|
||||
163
nbd-server-CVE-2024-7409-Close-stray-clients-at-serv.patch
Normal file
163
nbd-server-CVE-2024-7409-Close-stray-clients-at-serv.patch
Normal file
@ -0,0 +1,163 @@
|
||||
From 9178d429f77b14343fa9489949a0e8126e34890f Mon Sep 17 00:00:00 2001
|
||||
From: Eric Blake <eblake@redhat.com>
|
||||
Date: Wed, 7 Aug 2024 12:23:13 -0500
|
||||
Subject: [PATCH 8/8] nbd/server: CVE-2024-7409: Close stray clients at
|
||||
server-stop
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
A malicious client can attempt to connect to an NBD server, and then
|
||||
intentionally delay progress in the handshake, including if it does
|
||||
not know the TLS secrets. Although the previous two patches reduce
|
||||
this behavior by capping the default max-connections parameter and
|
||||
killing slow clients, they did not eliminate the possibility of a
|
||||
client waiting to close the socket until after the QMP nbd-server-stop
|
||||
command is executed, at which point qemu would SEGV when trying to
|
||||
dereference the NULL nbd_server global which is no longer present.
|
||||
This amounts to a denial of service attack. Worse, if another NBD
|
||||
server is started before the malicious client disconnects, I cannot
|
||||
rule out additional adverse effects when the old client interferes
|
||||
with the connection count of the new server (although the most likely
|
||||
is a crash due to an assertion failure when checking
|
||||
nbd_server->connections > 0).
|
||||
|
||||
For environments without this patch, the CVE can be mitigated by
|
||||
ensuring (such as via a firewall) that only trusted clients can
|
||||
connect to an NBD server. Note that using frameworks like libvirt
|
||||
that ensure that TLS is used and that nbd-server-stop is not executed
|
||||
while any trusted clients are still connected will only help if there
|
||||
is also no possibility for an untrusted client to open a connection
|
||||
but then stall on the NBD handshake.
|
||||
|
||||
Given the previous patches, it would be possible to guarantee that no
|
||||
clients remain connected by having nbd-server-stop sleep for longer
|
||||
than the default handshake deadline before finally freeing the global
|
||||
nbd_server object, but that could make QMP non-responsive for a long
|
||||
time. So intead, this patch fixes the problem by tracking all client
|
||||
sockets opened while the server is running, and forcefully closing any
|
||||
such sockets remaining without a completed handshake at the time of
|
||||
nbd-server-stop, then waiting until the coroutines servicing those
|
||||
sockets notice the state change. nbd-server-stop now has a second
|
||||
AIO_WAIT_WHILE_UNLOCKED (the first is indirectly through the
|
||||
blk_exp_close_all_type() that disconnects all clients that completed
|
||||
handshakes), but forced socket shutdown is enough to progress the
|
||||
coroutines and quickly tear down all clients before the server is
|
||||
freed, thus finally fixing the CVE.
|
||||
|
||||
This patch relies heavily on the fact that nbd/server.c guarantees
|
||||
that it only calls nbd_blockdev_client_closed() from the main loop
|
||||
(see the assertion in nbd_client_put() and the hoops used in
|
||||
nbd_client_put_nonzero() to achieve that); if we did not have that
|
||||
guarantee, we would also need a mutex protecting our accesses of the
|
||||
list of connections to survive re-entrancy from independent iothreads.
|
||||
|
||||
Although I did not actually try to test old builds, it looks like this
|
||||
problem has existed since at least commit 862172f45c (v2.12.0, 2017) -
|
||||
even back when that patch started using a QIONetListener to handle
|
||||
listening on multiple sockets, nbd_server_free() was already unaware
|
||||
that the nbd_blockdev_client_closed callback can be reached later by a
|
||||
client thread that has not completed handshakes (and therefore the
|
||||
client's socket never got added to the list closed in
|
||||
nbd_export_close_all), despite that patch intentionally tearing down
|
||||
the QIONetListener to prevent new clients.
|
||||
|
||||
Reported-by: Alexander Ivanov <alexander.ivanov@virtuozzo.com>
|
||||
Fixes: CVE-2024-7409
|
||||
CC: qemu-stable@nongnu.org
|
||||
Signed-off-by: Eric Blake <eblake@redhat.com>
|
||||
Message-ID: <20240807174943.771624-14-eblake@redhat.com>
|
||||
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
|
||||
---
|
||||
blockdev-nbd.c | 35 ++++++++++++++++++++++++++++++++++-
|
||||
1 file changed, 34 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
|
||||
index 67bb3fe4ce..09ad0bdffb 100644
|
||||
--- a/blockdev-nbd.c
|
||||
+++ b/blockdev-nbd.c
|
||||
@@ -20,12 +20,18 @@
|
||||
#include "io/channel-socket.h"
|
||||
#include "io/net-listener.h"
|
||||
|
||||
+typedef struct NBDConn {
|
||||
+ QIOChannelSocket *cioc;
|
||||
+ QLIST_ENTRY(NBDConn) next;
|
||||
+} NBDConn;
|
||||
+
|
||||
typedef struct NBDServerData {
|
||||
QIONetListener *listener;
|
||||
QCryptoTLSCreds *tlscreds;
|
||||
char *tlsauthz;
|
||||
uint32_t max_connections;
|
||||
uint32_t connections;
|
||||
+ QLIST_HEAD(, NBDConn) conns;
|
||||
} NBDServerData;
|
||||
|
||||
static NBDServerData *nbd_server;
|
||||
@@ -34,6 +40,14 @@ static void nbd_update_server_watch(NBDServerData *s);
|
||||
|
||||
static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
|
||||
{
|
||||
+ NBDConn *conn = nbd_client_owner(client);
|
||||
+
|
||||
+ assert(qemu_in_main_thread() && nbd_server);
|
||||
+
|
||||
+ object_unref(OBJECT(conn->cioc));
|
||||
+ QLIST_REMOVE(conn, next);
|
||||
+ g_free(conn);
|
||||
+
|
||||
nbd_client_put(client);
|
||||
assert(nbd_server->connections > 0);
|
||||
nbd_server->connections--;
|
||||
@@ -43,14 +57,20 @@ static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
|
||||
static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
|
||||
gpointer opaque)
|
||||
{
|
||||
+ NBDConn *conn = g_new0(NBDConn, 1);
|
||||
+
|
||||
+ assert(qemu_in_main_thread() && nbd_server);
|
||||
nbd_server->connections++;
|
||||
+ object_ref(OBJECT(cioc));
|
||||
+ conn->cioc = cioc;
|
||||
+ QLIST_INSERT_HEAD(&nbd_server->conns, conn, next);
|
||||
nbd_update_server_watch(nbd_server);
|
||||
|
||||
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
|
||||
/* TODO - expose handshake timeout as QMP option */
|
||||
nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
|
||||
nbd_server->tlscreds, nbd_server->tlsauthz,
|
||||
- nbd_blockdev_client_closed, NULL);
|
||||
+ nbd_blockdev_client_closed, conn);
|
||||
}
|
||||
|
||||
static void nbd_update_server_watch(NBDServerData *s)
|
||||
@@ -64,12 +84,25 @@ static void nbd_update_server_watch(NBDServerData *s)
|
||||
|
||||
static void nbd_server_free(NBDServerData *server)
|
||||
{
|
||||
+ NBDConn *conn, *tmp;
|
||||
+
|
||||
if (!server) {
|
||||
return;
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * Forcefully close the listener socket, and any clients that have
|
||||
+ * not yet disconnected on their own.
|
||||
+ */
|
||||
qio_net_listener_disconnect(server->listener);
|
||||
object_unref(OBJECT(server->listener));
|
||||
+ QLIST_FOREACH_SAFE(conn, &server->conns, next, tmp) {
|
||||
+ qio_channel_shutdown(QIO_CHANNEL(conn->cioc), QIO_CHANNEL_SHUTDOWN_BOTH,
|
||||
+ NULL);
|
||||
+ }
|
||||
+
|
||||
+ AIO_WAIT_WHILE_UNLOCKED(NULL, server->connections > 0);
|
||||
+
|
||||
if (server->tlscreds) {
|
||||
object_unref(OBJECT(server->tlscreds));
|
||||
}
|
||||
--
|
||||
2.45.1.windows.1
|
||||
|
||||
121
nbd-server-CVE-2024-7409-Drop-non-negotiating-client.patch
Normal file
121
nbd-server-CVE-2024-7409-Drop-non-negotiating-client.patch
Normal file
@ -0,0 +1,121 @@
|
||||
From 5b8f6de9445b18b0eac122b536a3fcca77a3ed1f Mon Sep 17 00:00:00 2001
|
||||
From: Eric Blake <eblake@redhat.com>
|
||||
Date: Thu, 8 Aug 2024 16:05:08 -0500
|
||||
Subject: [PATCH 5/8] nbd/server: CVE-2024-7409: Drop non-negotiating clients
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
A client that opens a socket but does not negotiate is merely hogging
|
||||
qemu's resources (an open fd and a small amount of memory); and a
|
||||
malicious client that can access the port where NBD is listening can
|
||||
attempt a denial of service attack by intentionally opening and
|
||||
abandoning lots of unfinished connections. The previous patch put a
|
||||
default bound on the number of such ongoing connections, but once that
|
||||
limit is hit, no more clients can connect (including legitimate ones).
|
||||
The solution is to insist that clients complete handshake within a
|
||||
reasonable time limit, defaulting to 10 seconds. A client that has
|
||||
not successfully completed NBD_OPT_GO by then (including the case of
|
||||
where the client didn't know TLS credentials to even reach the point
|
||||
of NBD_OPT_GO) is wasting our time and does not deserve to stay
|
||||
connected. Later patches will allow fine-tuning the limit away from
|
||||
the default value (including disabling it for doing integration
|
||||
testing of the handshake process itself).
|
||||
|
||||
Note that this patch in isolation actually makes it more likely to see
|
||||
qemu SEGV after nbd-server-stop, as any client socket still connected
|
||||
when the server shuts down will now be closed after 10 seconds rather
|
||||
than at the client's whims. That will be addressed in the next patch.
|
||||
|
||||
For a demo of this patch in action:
|
||||
$ qemu-nbd -f raw -r -t -e 10 file &
|
||||
$ nbdsh --opt-mode -c '
|
||||
H = list()
|
||||
for i in range(20):
|
||||
print(i)
|
||||
H.insert(i, nbd.NBD())
|
||||
H[i].set_opt_mode(True)
|
||||
H[i].connect_uri("nbd://localhost")
|
||||
'
|
||||
$ kill $!
|
||||
|
||||
where later connections get to start progressing once earlier ones are
|
||||
forcefully dropped for taking too long, rather than hanging.
|
||||
|
||||
Suggested-by: Daniel P. Berrangé <berrange@redhat.com>
|
||||
Signed-off-by: Eric Blake <eblake@redhat.com>
|
||||
Message-ID: <20240807174943.771624-13-eblake@redhat.com>
|
||||
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
|
||||
[eblake: rebase to changes earlier in series, reduce scope of timer]
|
||||
Signed-off-by: Eric Blake <eblake@redhat.com>
|
||||
---
|
||||
nbd/server.c | 28 +++++++++++++++++++++++++++-
|
||||
nbd/trace-events | 1 +
|
||||
2 files changed, 28 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/nbd/server.c b/nbd/server.c
|
||||
index 68d78888c6..112b987440 100644
|
||||
--- a/nbd/server.c
|
||||
+++ b/nbd/server.c
|
||||
@@ -2416,22 +2416,48 @@ static void nbd_client_receive_next_request(NBDClient *client)
|
||||
}
|
||||
}
|
||||
|
||||
+static void nbd_handshake_timer_cb(void *opaque)
|
||||
+{
|
||||
+ QIOChannel *ioc = opaque;
|
||||
+
|
||||
+ trace_nbd_handshake_timer_cb();
|
||||
+ qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
||||
+}
|
||||
+
|
||||
static coroutine_fn void nbd_co_client_start(void *opaque)
|
||||
{
|
||||
NBDClient *client = opaque;
|
||||
Error *local_err = NULL;
|
||||
+ QEMUTimer *handshake_timer = NULL;
|
||||
|
||||
qemu_co_mutex_init(&client->send_lock);
|
||||
|
||||
- /* TODO - utilize client->handshake_max_secs */
|
||||
+ /*
|
||||
+ * Create a timer to bound the time spent in negotiation. If the
|
||||
+ * timer expires, it is likely nbd_negotiate will fail because the
|
||||
+ * socket was shutdown.
|
||||
+ */
|
||||
+ if (client->handshake_max_secs > 0) {
|
||||
+ handshake_timer = aio_timer_new(qemu_get_aio_context(),
|
||||
+ QEMU_CLOCK_REALTIME,
|
||||
+ SCALE_NS,
|
||||
+ nbd_handshake_timer_cb,
|
||||
+ client->sioc);
|
||||
+ timer_mod(handshake_timer,
|
||||
+ qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
|
||||
+ client->handshake_max_secs * NANOSECONDS_PER_SECOND);
|
||||
+ }
|
||||
+
|
||||
if (nbd_negotiate(client, &local_err)) {
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
}
|
||||
+ timer_free(handshake_timer);
|
||||
client_close(client, false);
|
||||
return;
|
||||
}
|
||||
|
||||
+ timer_free(handshake_timer);
|
||||
nbd_client_receive_next_request(client);
|
||||
}
|
||||
|
||||
diff --git a/nbd/trace-events b/nbd/trace-events
|
||||
index 7ab6b3788c..57a859146e 100644
|
||||
--- a/nbd/trace-events
|
||||
+++ b/nbd/trace-events
|
||||
@@ -73,3 +73,4 @@ nbd_co_receive_request_decode_type(uint64_t handle, uint16_t type, const char *n
|
||||
nbd_co_receive_request_payload_received(uint64_t handle, uint32_t len) "Payload received: handle = %" PRIu64 ", len = %" PRIu32
|
||||
nbd_co_receive_align_compliance(const char *op, uint64_t from, uint32_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx32 ", align=0x%" PRIx32
|
||||
nbd_trip(void) "Reading request"
|
||||
+nbd_handshake_timer_cb(void) "client took too long to negotiate"
|
||||
--
|
||||
2.45.1.windows.1
|
||||
|
||||
162
nbd-server-Plumb-in-new-args-to-nbd_client_add.patch
Normal file
162
nbd-server-Plumb-in-new-args-to-nbd_client_add.patch
Normal file
@ -0,0 +1,162 @@
|
||||
From 8343eb0b064fd0d6a270812d43a5f4c6051b6714 Mon Sep 17 00:00:00 2001
|
||||
From: Eric Blake <eblake@redhat.com>
|
||||
Date: Wed, 7 Aug 2024 08:50:01 -0500
|
||||
Subject: [PATCH 2/8] nbd/server: Plumb in new args to nbd_client_add()
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Upcoming patches to fix a CVE need to track an opaque pointer passed
|
||||
in by the owner of a client object, as well as request for a time
|
||||
limit on how fast negotiation must complete. Prepare for that by
|
||||
changing the signature of nbd_client_new() and adding an accessor to
|
||||
get at the opaque pointer, although for now the two servers
|
||||
(qemu-nbd.c and blockdev-nbd.c) do not change behavior even though
|
||||
they pass in a new default timeout value.
|
||||
|
||||
Suggested-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
|
||||
Signed-off-by: Eric Blake <eblake@redhat.com>
|
||||
Message-ID: <20240807174943.771624-11-eblake@redhat.com>
|
||||
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
|
||||
[eblake: s/LIMIT/MAX_SECS/ as suggested by Dan]
|
||||
Signed-off-by: Eric Blake <eblake@redhat.com>
|
||||
---
|
||||
blockdev-nbd.c | 6 ++++--
|
||||
include/block/nbd.h | 11 ++++++++++-
|
||||
nbd/server.c | 20 +++++++++++++++++---
|
||||
qemu-nbd.c | 4 +++-
|
||||
4 files changed, 34 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
|
||||
index 66eebab318..0c14f033d2 100644
|
||||
--- a/blockdev-nbd.c
|
||||
+++ b/blockdev-nbd.c
|
||||
@@ -37,8 +37,10 @@ static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
|
||||
gpointer opaque)
|
||||
{
|
||||
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
|
||||
- nbd_client_new(cioc, nbd_server->tlscreds, nbd_server->tlsauthz,
|
||||
- nbd_blockdev_client_closed);
|
||||
+ /* TODO - expose handshake timeout as QMP option */
|
||||
+ nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
|
||||
+ nbd_server->tlscreds, nbd_server->tlsauthz,
|
||||
+ nbd_blockdev_client_closed, NULL);
|
||||
}
|
||||
|
||||
|
||||
diff --git a/include/block/nbd.h b/include/block/nbd.h
|
||||
index bb9f5bc021..68667c31c8 100644
|
||||
--- a/include/block/nbd.h
|
||||
+++ b/include/block/nbd.h
|
||||
@@ -25,6 +25,12 @@
|
||||
#include "crypto/tlscreds.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
+/*
|
||||
+ * NBD_DEFAULT_HANDSHAKE_MAX_SECS: Number of seconds in which client must
|
||||
+ * succeed at NBD_OPT_GO before being forcefully dropped as too slow.
|
||||
+ */
|
||||
+#define NBD_DEFAULT_HANDSHAKE_MAX_SECS 10
|
||||
+
|
||||
/* Handshake phase structs - this struct is passed on the wire */
|
||||
|
||||
struct NBDOption {
|
||||
@@ -339,9 +345,12 @@ NBDExport *nbd_export_find(const char *name);
|
||||
void nbd_export_close_all(void);
|
||||
|
||||
void nbd_client_new(QIOChannelSocket *sioc,
|
||||
+ uint32_t handshake_max_secs,
|
||||
QCryptoTLSCreds *tlscreds,
|
||||
const char *tlsauthz,
|
||||
- void (*close_fn)(NBDClient *, bool));
|
||||
+ void (*close_fn)(NBDClient *, bool),
|
||||
+ void *owner);
|
||||
+void *nbd_client_owner(NBDClient *client);
|
||||
void nbd_client_get(NBDClient *client);
|
||||
void nbd_client_put(NBDClient *client);
|
||||
|
||||
diff --git a/nbd/server.c b/nbd/server.c
|
||||
index b617e6a1a1..68d78888c6 100644
|
||||
--- a/nbd/server.c
|
||||
+++ b/nbd/server.c
|
||||
@@ -111,10 +111,12 @@ typedef struct NBDExportMetaContexts {
|
||||
struct NBDClient {
|
||||
int refcount;
|
||||
void (*close_fn)(NBDClient *client, bool negotiated);
|
||||
+ void *owner;
|
||||
|
||||
NBDExport *exp;
|
||||
QCryptoTLSCreds *tlscreds;
|
||||
char *tlsauthz;
|
||||
+ uint32_t handshake_max_secs;
|
||||
QIOChannelSocket *sioc; /* The underlying data channel */
|
||||
QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
|
||||
|
||||
@@ -2421,6 +2423,7 @@ static coroutine_fn void nbd_co_client_start(void *opaque)
|
||||
|
||||
qemu_co_mutex_init(&client->send_lock);
|
||||
|
||||
+ /* TODO - utilize client->handshake_max_secs */
|
||||
if (nbd_negotiate(client, &local_err)) {
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
@@ -2433,14 +2436,17 @@ static coroutine_fn void nbd_co_client_start(void *opaque)
|
||||
}
|
||||
|
||||
/*
|
||||
- * Create a new client listener using the given channel @sioc.
|
||||
+ * Create a new client listener using the given channel @sioc and @owner.
|
||||
* Begin servicing it in a coroutine. When the connection closes, call
|
||||
- * @close_fn with an indication of whether the client completed negotiation.
|
||||
+ * @close_fn with an indication of whether the client completed negotiation
|
||||
+ * within @handshake_max_secs seconds (0 for unbounded).
|
||||
*/
|
||||
void nbd_client_new(QIOChannelSocket *sioc,
|
||||
+ uint32_t handshake_max_secs,
|
||||
QCryptoTLSCreds *tlscreds,
|
||||
const char *tlsauthz,
|
||||
- void (*close_fn)(NBDClient *, bool))
|
||||
+ void (*close_fn)(NBDClient *, bool),
|
||||
+ void *owner)
|
||||
{
|
||||
NBDClient *client;
|
||||
Coroutine *co;
|
||||
@@ -2452,12 +2458,20 @@ void nbd_client_new(QIOChannelSocket *sioc,
|
||||
object_ref(OBJECT(client->tlscreds));
|
||||
}
|
||||
client->tlsauthz = g_strdup(tlsauthz);
|
||||
+ client->handshake_max_secs = handshake_max_secs;
|
||||
client->sioc = sioc;
|
||||
object_ref(OBJECT(client->sioc));
|
||||
client->ioc = QIO_CHANNEL(sioc);
|
||||
object_ref(OBJECT(client->ioc));
|
||||
client->close_fn = close_fn;
|
||||
+ client->owner = owner;
|
||||
|
||||
co = qemu_coroutine_create(nbd_co_client_start, client);
|
||||
qemu_coroutine_enter(co);
|
||||
}
|
||||
+
|
||||
+void *
|
||||
+nbd_client_owner(NBDClient *client)
|
||||
+{
|
||||
+ return client->owner;
|
||||
+}
|
||||
diff --git a/qemu-nbd.c b/qemu-nbd.c
|
||||
index 2dc2e8ea55..5f3abf53cd 100644
|
||||
--- a/qemu-nbd.c
|
||||
+++ b/qemu-nbd.c
|
||||
@@ -448,7 +448,9 @@ static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
|
||||
|
||||
nb_fds++;
|
||||
nbd_update_server_watch();
|
||||
- nbd_client_new(cioc, tlscreds, tlsauthz, nbd_client_closed);
|
||||
+ /* TODO - expose handshake timeout as command line option */
|
||||
+ nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS,
|
||||
+ tlscreds, tlsauthz, nbd_client_closed, NULL);
|
||||
}
|
||||
|
||||
static void nbd_update_server_watch(void)
|
||||
--
|
||||
2.45.1.windows.1
|
||||
|
||||
20
qemu.spec
20
qemu.spec
@ -1,6 +1,6 @@
|
||||
Name: qemu
|
||||
Version: 4.1.0
|
||||
Release: 85
|
||||
Release: 86
|
||||
Epoch: 10
|
||||
Summary: QEMU is a generic and open source machine emulator and virtualizer
|
||||
License: GPLv2 and BSD and MIT and CC-BY-SA-4.0
|
||||
@ -415,6 +415,14 @@ Patch0402: qcow2-Do-not-reopen-data_file-in-invalidate_cache.patch
|
||||
Patch0403: qcow2-Don-t-open-data_file-with-BDRV_O_NO_IO-CVE-202.patch
|
||||
Patch0404: block-introduce-bdrv_open_file_child-helper.patch
|
||||
Patch0405: block-Parse-filenames-only-when-explicitly-requested.patch
|
||||
Patch0406: nbd-Minor-style-and-typo-fixes.patch
|
||||
Patch0407: nbd-server-Plumb-in-new-args-to-nbd_client_add.patch
|
||||
Patch0408: nbd-Add-max-connections-to-nbd-server-start.patch
|
||||
Patch0409: nbd-server-CVE-2024-7409-Cap-default-max-connections.patch
|
||||
Patch0410: nbd-server-CVE-2024-7409-Drop-non-negotiating-client.patch
|
||||
Patch0411: aio-wait.h-introduce-AIO_WAIT_WHILE_UNLOCKED.patch
|
||||
Patch0412: main-loop.h-introduce-qemu_in_main_thread.patch
|
||||
Patch0413: nbd-server-CVE-2024-7409-Close-stray-clients-at-serv.patch
|
||||
|
||||
BuildRequires: flex
|
||||
BuildRequires: bison
|
||||
@ -815,6 +823,16 @@ getent passwd qemu >/dev/null || \
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Tue Aug 13 2024 Jiabo Feng <fengjiabo1@huawei.com> - 10:4.1.0-86
|
||||
- nbd/server: CVE-2024-7409: Close stray clients at server-stop
|
||||
- main-loop.h: introduce qemu_in_main_thread()
|
||||
- aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED
|
||||
- nbd/server: CVE-2024-7409: Drop non-negotiating clients
|
||||
- nbd/server: CVE-2024-7409: Cap default max-connections to 100
|
||||
- nbd: Add max-connections to nbd-server-start
|
||||
- nbd/server: Plumb in new args to nbd_client_add()
|
||||
- nbd: Minor style and typo fixes
|
||||
|
||||
* Thu Jul 11 2024 Jiabo Feng <fengjiabo1@huawei.com>
|
||||
- block: Parse filenames only when explicitly requested (CVE-2024-4467)
|
||||
- block: introduce bdrv_open_file_child() helper
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user