]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net: hns3: stop handling command queue while resetting VF
authorHuazhong Tan <tanhuazhong@huawei.com>
Fri, 9 Nov 2018 14:07:50 +0000 (22:07 +0800)
committerDavid S. Miller <davem@davemloft.net>
Sat, 10 Nov 2018 00:47:35 +0000 (16:47 -0800)
According to hardware's description, after the reset occurs, the driver
needs to re-initialize the command queue before sending and receiving
any commands. Therefore, the VF's driver needs to identify the command
queue needs to re-initialize with HCLGEVF_STATE_CMD_DISABLE, and does
not allow sending or receiving commands before the re-initialization.

Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c

index b917acf71ee6de45d7972c21c3052d87f1869a64..d5765c8cf3a3084dbae68fedfc1050125cd49083 100644 (file)
@@ -189,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
 
        spin_lock_bh(&hw->cmq.csq.lock);
 
-       if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+       if (num > hclgevf_ring_space(&hw->cmq.csq) ||
+           test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
                spin_unlock_bh(&hw->cmq.csq.lock);
                return -EBUSY;
        }
@@ -338,6 +339,16 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
        spin_unlock_bh(&hdev->hw.cmq.crq.lock);
        spin_unlock_bh(&hdev->hw.cmq.csq.lock);
 
+       clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+       /* Check if there is new reset pending, because the higher level
+        * reset may happen when lower level reset is being processed.
+        */
+       if (hclgevf_is_reset_pending(hdev)) {
+               set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+               return -EBUSY;
+       }
+
        /* get firmware version */
        ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
        if (ret) {
index fe97cc7f621b96121c7c102bdc12215b1e1ba44a..5da9053ebf4bd0f50b03a493b65d482f711ea94a 100644 (file)
@@ -1162,6 +1162,8 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
                break;
        }
 
+       set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
        dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
                 hdev->reset_type, ret);
 
@@ -1467,6 +1469,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
                         "receive reset interrupt 0x%x!\n", rst_ing_reg);
                set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
                set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+               set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
                cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
                *clearval = cmdq_src_reg;
                return HCLGEVF_VECTOR0_EVENT_RST;
index e37897df6afc1ee45022d7e585ae09735869f445..ffe3d495376cb0aeb2e839e116fdc2b3987864ca 100644 (file)
@@ -79,6 +79,7 @@ enum hclgevf_states {
        HCLGEVF_STATE_RST_HANDLING,
        HCLGEVF_STATE_MBX_SERVICE_SCHED,
        HCLGEVF_STATE_MBX_HANDLING,
+       HCLGEVF_STATE_CMD_DISABLE,
 };
 
 #define HCLGEVF_MPF_ENBALE 1
@@ -212,6 +213,11 @@ struct hclgevf_dev {
        u32 flag;
 };
 
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
+{
+       return !!hdev->reset_pending;
+}
+
 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
                         const u8 *msg_data, u8 msg_len, bool need_resp,
                         u8 *resp_data, u16 resp_len);
index 01a028aa71f7903cd51f8c50ceb38c8b5c65ecdd..ef9c8e6eca28f2e26aa42f864c707c6a1befdb9b 100644 (file)
@@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
        }
 
        while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+               if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+                       return -EIO;
+
                udelay(HCLGEVF_SLEEP_USCOEND);
                i++;
        }
@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
        crq = &hdev->hw.cmq.crq;
 
        while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+               if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+                       dev_info(&hdev->pdev->dev, "vf crq need init\n");
+                       return;
+               }
+
                desc = &crq->desc[crq->next_to_use];
                req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
 
@@ -249,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
 
        /* process all the async queue messages */
        while (tail != hdev->arq.head) {
+               if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+                       dev_info(&hdev->pdev->dev,
+                                "vf crq need init in async\n");
+                       return;
+               }
+
                msg_q = hdev->arq.msg_q[hdev->arq.head];
 
                switch (msg_q[0]) {