137 lines
4.5 KiB
Diff
137 lines
4.5 KiB
Diff
From 8b090f2664e9d014cd8fa0fde90597aaf4349e7e Mon Sep 17 00:00:00 2001
|
|
From: Alexander Kozyrev <akozyrev@nvidia.com>
|
|
Date: Thu, 11 Aug 2022 21:19:36 +0300
|
|
Subject: [PATCH] net/mlx5: fix Rx queue recovery mechanism
|
|
|
|
The local variables are getting inconsistent in data receiving routines
|
|
after queue error recovery.
|
|
Receive queue consumer index is getting wrong, need to reset one to the
|
|
size of the queue (as RQ was fully replenished in recovery procedure).
|
|
|
|
In MPRQ case, also the local consumed strd variable should be reset.
|
|
|
|
CVE-2022-28199
|
|
Fixes: 88c0733 ("net/mlx5: extend Rx completion with error handling")
|
|
|
|
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
|
|
Signed-off-by: Matan Azrad <matan@nvidia.com>
|
|
Conflict: NA
|
|
Reference: https://git.dpdk.org/dpdk-stable/commit/?id=8b090f2664
|
|
---
|
|
drivers/net/mlx5/mlx5_rxtx.c | 34 ++++++++++++++++++++++++----------
|
|
1 file changed, 24 insertions(+), 10 deletions(-)
|
|
|
|
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
|
|
index a4f627e1a6..07c994815a 100644
|
|
--- a/drivers/net/mlx5/mlx5_rxtx.c
|
|
+++ b/drivers/net/mlx5/mlx5_rxtx.c
|
|
@@ -931,6 +931,11 @@ mlx5_queue_state_modify(struct rte_eth_dev *dev,
|
|
return ret;
|
|
}
|
|
|
|
+/* Must be negative. */
|
|
+#define MLX5_ERROR_CQE_RET (-1)
|
|
+/* Must not be negative. */
|
|
+#define MLX5_RECOVERY_ERROR_RET 0
|
|
+
|
|
/**
|
|
* Handle a Rx error.
|
|
* The function inserts the RQ state to reset when the first error CQE is
|
|
@@ -945,7 +950,7 @@ mlx5_queue_state_modify(struct rte_eth_dev *dev,
|
|
* 0 when called from non-vectorized Rx burst.
|
|
*
|
|
* @return
|
|
- * -1 in case of recovery error, otherwise the CQE status.
|
|
+ * MLX5_RECOVERY_ERROR_RET in case of recovery error, otherwise the CQE status.
|
|
*/
|
|
int
|
|
mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
@@ -973,7 +978,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
sm.queue_id = rxq->idx;
|
|
sm.state = IBV_WQS_RESET;
|
|
if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
|
|
- return -1;
|
|
+ return MLX5_RECOVERY_ERROR_RET;
|
|
if (rxq_ctrl->dump_file_n <
|
|
rxq_ctrl->priv->config.max_dump_files_num) {
|
|
MKSTR(err_str, "Unexpected CQE error syndrome "
|
|
@@ -1014,7 +1019,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
sm.state = IBV_WQS_RDY;
|
|
if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
|
|
&sm))
|
|
- return -1;
|
|
+ return MLX5_RECOVERY_ERROR_RET;
|
|
if (vec) {
|
|
const uint16_t q_mask = wqe_n - 1;
|
|
uint16_t elt_idx;
|
|
@@ -1036,7 +1041,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
rte_pktmbuf_free_seg
|
|
(*elt);
|
|
}
|
|
- return -1;
|
|
+ return MLX5_RECOVERY_ERROR_RET;
|
|
}
|
|
}
|
|
for (i = 0; i < (int)wqe_n; ++i) {
|
|
@@ -1055,7 +1060,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
}
|
|
return ret;
|
|
default:
|
|
- return -1;
|
|
+ return MLX5_RECOVERY_ERROR_RET;
|
|
}
|
|
}
|
|
|
|
@@ -1073,7 +1078,9 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
|
|
* written.
|
|
*
|
|
* @return
|
|
- * 0 in case of empty CQE, otherwise the packet size in bytes.
|
|
+ * 0 in case of empty CQE, MLX5_ERROR_CQE_RET in case of error CQE,
|
|
+ * otherwise the packet size in regular RxQ, and striding byte
|
|
+ * count format in mprq case.
|
|
*/
|
|
static inline int
|
|
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
|
|
@@ -1140,8 +1147,8 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
|
|
rxq->err_state)) {
|
|
ret = mlx5_rx_err_handle(rxq, 0);
|
|
if (ret == MLX5_CQE_STATUS_HW_OWN ||
|
|
- ret == -1)
|
|
- return 0;
|
|
+ ret == MLX5_RECOVERY_ERROR_RET)
|
|
+ return MLX5_ERROR_CQE_RET;
|
|
} else {
|
|
return 0;
|
|
}
|
|
@@ -1350,8 +1357,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
if (!pkt) {
|
|
cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
|
|
len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
|
|
- if (!len) {
|
|
+ if (len <= 0) {
|
|
rte_mbuf_raw_free(rep);
|
|
+ if (unlikely(len == MLX5_ERROR_CQE_RET))
|
|
+ rq_ci = rxq->rq_ci << sges_n;
|
|
break;
|
|
}
|
|
pkt = seg;
|
|
@@ -1630,8 +1639,13 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
}
|
|
cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
|
|
ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
|
|
- if (!ret)
|
|
+ if (ret == 0)
|
|
break;
|
|
+ if (unlikely(ret == MLX5_ERROR_CQE_RET)) {
|
|
+ rq_ci = rxq->rq_ci;
|
|
+ consumed_strd = rxq->consumed_strd;
|
|
+ break;
|
|
+ }
|
|
byte_cnt = ret;
|
|
strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
|
|
MLX5_MPRQ_STRIDE_NUM_SHIFT;
|
|
--
|
|
2.23.0
|
|
|