]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net: hns3: do not schedule the periodic task when reset fail
authorGuojia Liao <liaoguojia@huawei.com>
Sat, 14 Dec 2019 02:06:41 +0000 (10:06 +0800)
committerDavid S. Miller <davem@davemloft.net>
Tue, 17 Dec 2019 00:12:25 +0000 (16:12 -0800)
service_task will be scheduled  per second to do some periodic
jobs. When reset fails, it means this device is not available
now, so the periodic jobs do not need to be handled.

This patch adds flag HCLGE_STATE_RST_FAIL/HCLGEVF_STATE_RST_FAIL
to indicate that reset fails, and checks this flag before
schedule periodic task.

Signed-off-by: Guojia Liao <liaoguojia@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h

index 5129b4a55f9bcf6958b7a1441d19207c4a857308..4e7a07877e5d75f7578ebbf26f8a9f6855d62ae0 100644 (file)
@@ -2683,7 +2683,8 @@ static void hclge_reset_task_schedule(struct hclge_dev *hdev)
 
 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
 {
-       if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state))
+       if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+           !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
                mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
                                    hclge_wq, &hdev->service_task,
                                    delay_time);
@@ -3690,6 +3691,8 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
 
        hclge_dbg_dump_rst_info(hdev);
 
+       set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
        return false;
 }
 
@@ -3843,6 +3846,7 @@ static void hclge_reset(struct hclge_dev *hdev)
        hdev->rst_stats.reset_fail_cnt = 0;
        hdev->rst_stats.reset_done_cnt++;
        ae_dev->reset_type = HNAE3_NONE_RESET;
+       clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
 
        /* if default_reset_request has a higher level reset request,
         * it should be handled as soon as possible. since some errors
@@ -9303,6 +9307,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
        set_bit(HCLGE_STATE_DOWN, &hdev->state);
        clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
        clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+       clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
        clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
        clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
 }
index ad40cf6be4f1d040d5e7db601c0ae775829bc989..3a913976b820d6cde918dd84bb21f5467469aecc 100644 (file)
@@ -215,6 +215,7 @@ enum HCLGE_DEV_STATE {
        HCLGE_STATE_STATISTICS_UPDATING,
        HCLGE_STATE_CMD_DISABLE,
        HCLGE_STATE_LINK_UPDATING,
+       HCLGE_STATE_RST_FAIL,
        HCLGE_STATE_MAX
 };
 
index b56c19afc3b9afc083b4451c65226a56f6331de4..c33b8027f80120945cd154450f7bef6e1ee3d8f9 100644 (file)
@@ -1598,6 +1598,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
                set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                hclgevf_reset_task_schedule(hdev);
        } else {
+               set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
                hclgevf_dump_rst_info(hdev);
        }
 }
@@ -1659,6 +1660,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
        ae_dev->reset_type = HNAE3_NONE_RESET;
        hdev->rst_stats.rst_done_cnt++;
        hdev->rst_stats.rst_fail_cnt = 0;
+       clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
 
        return ret;
 err_reset_lock:
@@ -1791,7 +1793,8 @@ void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
 static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
                                  unsigned long delay)
 {
-       if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state))
+       if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+           !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
                mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
 }
 
@@ -2283,6 +2286,7 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
 {
        clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
        clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+       clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
 
        INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
 
index 450e5878293f465f1cc41c4ae8989b63fbaf0acd..003114f6db6cfa9b666196bf31c149bcbc5b07e0 100644 (file)
@@ -148,6 +148,7 @@ enum hclgevf_states {
        HCLGEVF_STATE_MBX_HANDLING,
        HCLGEVF_STATE_CMD_DISABLE,
        HCLGEVF_STATE_LINK_UPDATING,
+       HCLGEVF_STATE_RST_FAIL,
 };
 
 struct hclgevf_mac {