177 lines
5.6 KiB
Diff
177 lines
5.6 KiB
Diff
From f13115e21001b7e99a243c5ac949f0a4c378862f Mon Sep 17 00:00:00 2001
|
|
From: Peng Li <lipeng321@huawei.com>
|
|
Date: Tue, 28 Sep 2021 11:51:59 +0800
|
|
Subject: [PATCH 108/283] net: hns3: package new functions to simplify
|
|
hclgevf_mbx_handler code
|
|
|
|
mainline inclusion
|
|
from mainline-v5.15-rc1
|
|
commit d7517f8f6b3b12c883ca0975659450ae009b1524
|
|
category: feature
|
|
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I8EMYT
|
|
CVE: NA
|
|
|
|
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d7517f8f6b3b12c883ca0975659450ae009b1524
|
|
|
|
----------------------------------------------------------------------
|
|
|
|
This patch packages two new function to simplify the function
|
|
hclgevf_mbx_handler, and it can reduce the code cycle complexity
|
|
and make code more concise.
|
|
|
|
Signed-off-by: Peng Li <lipeng321@huawei.com>
|
|
Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
Reviewed-by: Yongxin Li <liyongxin1@huawei.com>
|
|
Signed-off-by: Junxin Chen <chenjunxin1@huawei.com>
|
|
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com>
|
|
Signed-off-by: Xiaodong Li <lixiaodong67@huawei.com>
|
|
|
|
Conflicts:
|
|
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
|
|
---
|
|
.../hisilicon/hns3/hns3vf/hclgevf_mbx.c | 107 +++++++++---------
|
|
1 file changed, 55 insertions(+), 52 deletions(-)
|
|
|
|
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
|
|
index 995321058a70..510d9826e998 100644
|
|
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
|
|
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
|
|
@@ -181,15 +181,66 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
|
|
return tail == hw->cmq.crq.next_to_use;
|
|
}
|
|
|
|
+static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
|
|
+ struct hclge_mbx_pf_to_vf_cmd *req)
|
|
+{
|
|
+ struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
|
|
+
|
|
+ if (resp->received_resp)
|
|
+ dev_warn(&hdev->pdev->dev,
|
|
+ "VF mbx resp flag not clear(%u)\n",
|
|
+ req->msg.vf_mbx_msg_code);
|
|
+
|
|
+ resp->origin_mbx_msg =
|
|
+ (req->msg.vf_mbx_msg_code << 16);
|
|
+ resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
|
|
+ resp->resp_status =
|
|
+ hclgevf_resp_to_errno(req->msg.resp_status);
|
|
+ memcpy(resp->additional_info, req->msg.resp_data,
|
|
+ HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
|
|
+ if (req->match_id) {
|
|
+ /* If match_id is not zero, it means PF support match_id.
|
|
+ * if the match_id is right, VF get the right response, or
|
|
+ * ignore the response. and driver will clear hdev->mbx_resp
|
|
+ * when send next message which need response.
|
|
+ */
|
|
+ if (req->match_id == resp->match_id)
|
|
+ resp->received_resp = true;
|
|
+ } else {
|
|
+ resp->received_resp = true;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
|
|
+ struct hclge_mbx_pf_to_vf_cmd *req)
|
|
+{
|
|
+ /* we will drop the async msg if we find ARQ as full
|
|
+ * and continue with next message
|
|
+ */
|
|
+ if (atomic_read(&hdev->arq.count) >=
|
|
+ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
|
|
+ dev_warn(&hdev->pdev->dev,
|
|
+ "Async Q full, dropping msg(%u)\n",
|
|
+ req->msg.code);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* tail the async message in arq */
|
|
+ memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg,
|
|
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
|
|
+ hclge_mbx_tail_ptr_move_arq(hdev->arq);
|
|
+ atomic_inc(&hdev->arq.count);
|
|
+
|
|
+ hclgevf_mbx_task_schedule(hdev);
|
|
+}
|
|
+
|
|
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
|
{
|
|
- struct hclgevf_mbx_resp_status *resp;
|
|
struct hclge_mbx_pf_to_vf_cmd *req;
|
|
struct hclgevf_cmq_ring *crq;
|
|
struct hclgevf_desc *desc;
|
|
u16 flag;
|
|
|
|
- resp = &hdev->mbx_resp;
|
|
crq = &hdev->hw.cmq.crq;
|
|
|
|
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
|
|
@@ -223,62 +274,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
|
|
*/
|
|
switch (req->msg.code) {
|
|
case HCLGE_MBX_PF_VF_RESP:
|
|
- if (resp->received_resp)
|
|
- dev_warn(&hdev->pdev->dev,
|
|
- "VF mbx resp flag not clear(%u)\n",
|
|
- req->msg.vf_mbx_msg_code);
|
|
-
|
|
- resp->origin_mbx_msg =
|
|
- (req->msg.vf_mbx_msg_code << 16);
|
|
- resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
|
|
- resp->resp_status =
|
|
- hclgevf_resp_to_errno(req->msg.resp_status);
|
|
-
|
|
- memcpy(resp->additional_info, req->msg.resp_data,
|
|
- HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
|
|
-
|
|
- /* ensure additional_info will be seen before setting
|
|
- * received_resp
|
|
- */
|
|
- smp_wmb();
|
|
-
|
|
- if (req->match_id) {
|
|
- /* If match_id is not zero, it means PF support
|
|
- * match_id. If the match_id is right, VF get
|
|
- * the right response, orignore the response,
|
|
- * and driver will clear hdev->mbx_resp when
|
|
- * send next message which need response.
|
|
- */
|
|
- if (req->match_id == resp->match_id)
|
|
- resp->received_resp = true;
|
|
- } else {
|
|
- resp->received_resp = true;
|
|
- }
|
|
+ hclgevf_handle_mbx_response(hdev, req);
|
|
break;
|
|
case HCLGE_MBX_LINK_STAT_CHANGE:
|
|
case HCLGE_MBX_ASSERTING_RESET:
|
|
case HCLGE_MBX_LINK_STAT_MODE:
|
|
case HCLGE_MBX_PUSH_VLAN_INFO:
|
|
case HCLGE_MBX_PUSH_PROMISC_INFO:
|
|
- /* we will drop the async msg if we find ARQ as full
|
|
- * and continue with next message
|
|
- */
|
|
- if (atomic_read(&hdev->arq.count) >=
|
|
- HCLGE_MBX_MAX_ARQ_MSG_NUM) {
|
|
- dev_warn(&hdev->pdev->dev,
|
|
- "Async Q full, dropping msg(%u)\n",
|
|
- req->msg.code);
|
|
- break;
|
|
- }
|
|
-
|
|
- /* tail the async message in arq */
|
|
- memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg,
|
|
- HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
|
|
- hclge_mbx_tail_ptr_move_arq(hdev->arq);
|
|
- atomic_inc(&hdev->arq.count);
|
|
-
|
|
- hclgevf_mbx_task_schedule(hdev);
|
|
-
|
|
+ hclgevf_handle_mbx_msg(hdev, req);
|
|
break;
|
|
default:
|
|
dev_err(&hdev->pdev->dev,
|
|
--
|
|
2.34.1
|
|
|