!214 [sync] PR-210: fix CVE-2022-2132

From: @openeuler-sync-bot 
Reviewed-by: @jiangheng12 
Signed-off-by: @jiangheng12
This commit is contained in:
openeuler-ci-bot 2022-09-09 01:12:21 +00:00 committed by Gitee
commit 99d9187a99
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
4 changed files with 347 additions and 1 deletions

View File

@ -0,0 +1,73 @@
From 5b3c25e6ee2c68887aae166aed57d0b4af91fa60 Mon Sep 17 00:00:00 2001
From: Maxime Coquelin <maxime.coquelin@redhat.com>
Date: Thu, 16 Jun 2022 11:35:56 +0200
Subject: [PATCH] vhost: discard too small descriptor chains
This patch discards descriptor chains which are smaller
than the Virtio-net header size, and ones that are equal.
Indeed, such descriptor chains sizes mean there is no
packet data.
This patch also has the advantage of requesting the exact
packets sizes for the mbufs.
CVE-2022-2132
Fixes: 62250c1d0978 ("vhost: extract split ring handling from Rx and Tx functions")
Fixes: c3ff0ac70acb ("vhost: improve performance by supporting large buffer")
Cc: stable@dpdk.org
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/librte_vhost/virtio_net.c | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ae4e54a442..1fcbc1aca9 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1412,10 +1412,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
- if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
- error = -1;
- goto out;
- }
+ /*
+ * The caller has checked the descriptors chain is larger than the
+ * header size.
+ */
if (virtio_net_with_host_offload(dev)) {
if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
@@ -1742,6 +1742,14 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (likely(dev->dequeue_zero_copy == 0))
update_shadow_used_ring_split(vq, head_idx, 0);
+ if (unlikely(buf_len <= dev->vhost_hlen)) {
+ dropped += 1;
+ i++;
+ break;
+ }
+
+ buf_len -= dev->vhost_hlen;
+
pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
if (unlikely(pkts[i] == NULL)) {
/*
@@ -1955,6 +1963,11 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
VHOST_ACCESS_RO) < 0))
return -1;
+ if (unlikely(buf_len <= dev->vhost_hlen))
+ return -1;
+
+ buf_len -= dev->vhost_hlen;
+
*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
if (unlikely(*pkts == NULL)) {
if (!allocerr_warned) {
--
2.23.0

View File

@ -0,0 +1,103 @@
From e73049ea26a588518bde750f46ac700462a598ed Mon Sep 17 00:00:00 2001
From: Maxime Coquelin <maxime.coquelin@redhat.com>
Date: Thu, 16 Jun 2022 14:25:07 +0200
Subject: [PATCH] vhost: fix header spanned across more than two descriptors
This patch aims at supporting the unlikely case where a
Virtio-net header is spanned across more than two
descriptors.
CVE-2022-2132
Fixes: fd68b4739d2c ("vhost: use buffer vectors in dequeue path")
Cc: stable@dpdk.org
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/librte_vhost/virtio_net.c | 45 +++++++++++------------------------
1 file changed, 14 insertions(+), 31 deletions(-)
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 1fcbc1aca9..af735f9c2b 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1399,26 +1399,22 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t buf_avail, buf_offset;
uint64_t buf_addr, buf_iova, buf_len;
uint32_t mbuf_avail, mbuf_offset;
+ uint32_t hdr_remain = dev->vhost_hlen;
uint32_t cpy_len;
struct rte_mbuf *cur = m, *prev = m;
struct virtio_net_hdr tmp_hdr;
struct virtio_net_hdr *hdr = NULL;
- /* A counter to avoid desc dead loop chain */
- uint16_t vec_idx = 0;
+ uint16_t vec_idx;
struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
int error = 0;
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
-
/*
* The caller has checked the descriptors chain is larger than the
* header size.
*/
if (virtio_net_with_host_offload(dev)) {
- if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
+ if (unlikely(buf_vec[0].buf_len < sizeof(struct virtio_net_hdr))) {
/*
* No luck, the virtio-net header doesn't fit
* in a contiguous virtual area.
@@ -1426,36 +1422,23 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
hdr = &tmp_hdr;
} else {
- hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
+ hdr = (struct virtio_net_hdr *)((uintptr_t)buf_vec[0].buf_addr);
}
}
- /*
- * A virtio driver normally uses at least 2 desc buffers
- * for Tx: the first for storing the header, and others
- * for storing the data.
- */
- if (unlikely(buf_len < dev->vhost_hlen)) {
- buf_offset = dev->vhost_hlen - buf_len;
- vec_idx++;
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
- buf_avail = buf_len - buf_offset;
- } else if (buf_len == dev->vhost_hlen) {
- if (unlikely(++vec_idx >= nr_vec))
- goto out;
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
+ for (vec_idx = 0; vec_idx < nr_vec; vec_idx++) {
+ if (buf_vec[vec_idx].buf_len > hdr_remain)
+ break;
- buf_offset = 0;
- buf_avail = buf_len;
- } else {
- buf_offset = dev->vhost_hlen;
- buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
+ hdr_remain -= buf_vec[vec_idx].buf_len;
}
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+ buf_offset = hdr_remain;
+ buf_avail = buf_vec[vec_idx].buf_len - hdr_remain;
+
PRINT_PACKET(dev,
(uintptr_t)(buf_addr + buf_offset),
(uint32_t)buf_avail, 0);
--
2.23.0

View File

@ -0,0 +1,163 @@
From 0fd5608ef97f9c467f1ecc926463cf793189443e Mon Sep 17 00:00:00 2001
From: Sivaprasad Tummala <sivaprasad.tummala@intel.com>
Date: Fri, 8 May 2020 16:47:51 +0530
Subject: [PATCH] vhost: handle mbuf allocation failure
vhost buffer allocation is successful for packets that fit
into a linear buffer. If it fails, vhost library is expected
to drop the current packet and skip to the next.
The patch fixes the error scenario by skipping to next packet.
Note: Drop counters are not currently supported.
Fixes: c3ff0ac70acb ("vhost: improve performance by supporting large buffer")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/librte_vhost/virtio_net.c | 70 +++++++++++++++++++++++++++--------
1 file changed, 55 insertions(+), 15 deletions(-)
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 5e8c6b99c0..751c1f3733 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1673,6 +1673,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
{
uint16_t i;
uint16_t free_entries;
+ uint16_t dropped = 0;
+ static bool allocerr_warned;
if (unlikely(dev->dequeue_zero_copy)) {
struct zcopy_mbuf *zmbuf, *next;
@@ -1734,13 +1736,35 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
update_shadow_used_ring_split(vq, head_idx, 0);
pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
- if (unlikely(pkts[i] == NULL))
+ if (unlikely(pkts[i] == NULL)) {
+ /*
+ * mbuf allocation fails for jumbo packets when external
+ * buffer allocation is not allowed and linear buffer
+ * is required. Drop this packet.
+ */
+ if (!allocerr_warned) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed mbuf alloc of size %d from %s on %s.\n",
+ buf_len, mbuf_pool->name, dev->ifname);
+ allocerr_warned = true;
+ }
+ dropped += 1;
+ i++;
break;
+ }
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
mbuf_pool);
if (unlikely(err)) {
rte_pktmbuf_free(pkts[i]);
+ if (!allocerr_warned) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to copy desc to mbuf on %s.\n",
+ dev->ifname);
+ allocerr_warned = true;
+ }
+ dropped += 1;
+ i++;
break;
}
@@ -1750,6 +1774,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
zmbuf = get_zmbuf(vq);
if (!zmbuf) {
rte_pktmbuf_free(pkts[i]);
+ dropped += 1;
+ i++;
break;
}
zmbuf->mbuf = pkts[i];
@@ -1779,7 +1805,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
}
- return i;
+ return (i - dropped);
}
static __rte_always_inline int
@@ -1913,6 +1939,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
uint32_t buf_len;
uint16_t nr_vec = 0;
int err;
+ static bool allocerr_warned;
if (unlikely(fill_vec_buf_packed(dev, vq,
vq->last_avail_idx, desc_count,
@@ -1923,14 +1950,24 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
if (unlikely(*pkts == NULL)) {
- RTE_LOG(ERR, VHOST_DATA,
- "Failed to allocate memory for mbuf.\n");
+ if (!allocerr_warned) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed mbuf alloc of size %d from %s on %s.\n",
+ buf_len, mbuf_pool->name, dev->ifname);
+ allocerr_warned = true;
+ }
return -1;
}
err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
mbuf_pool);
if (unlikely(err)) {
+ if (!allocerr_warned) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to copy desc to mbuf on %s.\n",
+ dev->ifname);
+ allocerr_warned = true;
+ }
rte_pktmbuf_free(*pkts);
return -1;
}
@@ -1945,21 +1982,24 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
struct rte_mbuf **pkts)
{
- uint16_t buf_id, desc_count;
+ uint16_t buf_id, desc_count = 0;
+ int ret;
- if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
- &desc_count))
- return -1;
+ ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+ &desc_count);
- if (virtio_net_is_inorder(dev))
- vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
- desc_count);
- else
- vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+ if (likely(desc_count > 0)) {
+ if (virtio_net_is_inorder(dev))
+ vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
+ desc_count);
+ else
+ vhost_shadow_dequeue_single_packed(vq, buf_id,
+ desc_count);
- vq_inc_last_avail_packed(vq, desc_count);
+ vq_inc_last_avail_packed(vq, desc_count);
+ }
- return 0;
+ return ret;
}
static __rte_always_inline int
--
2.23.0

View File

@ -1,6 +1,6 @@
Name: dpdk
Version: 19.11
Release: 18
Release: 19
Packager: packaging@6wind.com
URL: http://dpdk.org
%global source_version 19.11
@ -44,6 +44,10 @@ Patch34: 0018-fix-error-that-the-secondary-attach-fails-due-to-detach.patch
Patch35: CVE-2021-3839.patch
Patch36: CVE-2022-0669.patch
Patch6000: backport-vhost-handle-mbuf-allocation-failure.patch
Patch6001: backport-0001-CVE-2022-2132.patch
Patch6002: backport-0002-CVE-2022-2131.patch
Summary: Data Plane Development Kit core
Group: System Environment/Libraries
License: BSD and LGPLv2 and GPLv2
@ -195,6 +199,9 @@ strip -g $RPM_BUILD_ROOT/lib/modules/${namer}/extra/dpdk/rte_kni.ko
/usr/sbin/depmod
%changelog
* Thu Sep 8 2022 jiangheng <jiangheng14@huawei.com> - 19.11-19
- fix CVE-2022-2132
* Tue Aug 9 2022 wuchangsheng <wuchangsheng2@huawei.com> - 19.11-18
- enable mlx4 mlx5 pmd driver
- mv so lib in main package from devel-package