1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
5 #include "hclgevf_main.h"
8 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
10 /* this function should be called with mbx_resp.mbx_mutex held
11 * to prtect the received_response from race condition
13 hdev->mbx_resp.received_resp = false;
14 hdev->mbx_resp.origin_mbx_msg = 0;
15 hdev->mbx_resp.resp_status = 0;
16 memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
19 /* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
21 * @hdev: pointer to struct hclgevf_dev
22 * @resp_msg: pointer to store the original message type and response status
23 * @len: the resp_msg data array length.
25 static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
26 u8 *resp_data, u16 resp_len)
28 #define HCLGEVF_MAX_TRY_TIMES 500
29 #define HCLGEVF_SLEEP_USECOND 1000
30 struct hclgevf_mbx_resp_status *mbx_resp;
34 if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
35 dev_err(&hdev->pdev->dev,
36 "VF mbx response len(=%d) exceeds maximum(=%d)\n",
38 HCLGE_MBX_MAX_RESP_DATA_SIZE);
42 while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
43 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
46 usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
50 if (i >= HCLGEVF_MAX_TRY_TIMES) {
51 dev_err(&hdev->pdev->dev,
52 "VF could not get mbx resp(=%d) from PF in %d tries\n",
53 hdev->mbx_resp.received_resp, i);
57 mbx_resp = &hdev->mbx_resp;
58 r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
59 r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
61 if (mbx_resp->resp_status)
62 return mbx_resp->resp_status;
65 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
67 hclgevf_reset_mbx_resp_status(hdev);
69 if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
70 dev_err(&hdev->pdev->dev,
71 "VF could not match resp code(code0=%d,code1=%d), %d",
72 code0, code1, mbx_resp->resp_status);
79 int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
80 const u8 *msg_data, u8 msg_len, bool need_resp,
81 u8 *resp_data, u16 resp_len)
83 struct hclge_mbx_vf_to_pf_cmd *req;
84 struct hclgevf_desc desc;
87 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
89 /* first two bytes are reserved for code & subcode */
90 if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
91 dev_err(&hdev->pdev->dev,
92 "VF send mbx msg fail, msg len %d exceeds max len %d\n",
93 msg_len, HCLGE_MBX_MAX_MSG_SIZE);
97 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
99 req->msg[1] = subcode;
100 memcpy(&req->msg[2], msg_data, msg_len);
102 /* synchronous send */
104 mutex_lock(&hdev->mbx_resp.mbx_mutex);
105 hclgevf_reset_mbx_resp_status(hdev);
106 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
108 dev_err(&hdev->pdev->dev,
109 "VF failed(=%d) to send mbx message to PF\n",
111 mutex_unlock(&hdev->mbx_resp.mbx_mutex);
115 status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
117 mutex_unlock(&hdev->mbx_resp.mbx_mutex);
119 /* asynchronous send */
120 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
122 dev_err(&hdev->pdev->dev,
123 "VF failed(=%d) to send mbx message to PF\n",
132 static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
134 u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);
136 return tail == hw->cmq.crq.next_to_use;
139 void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
141 struct hclgevf_mbx_resp_status *resp;
142 struct hclge_mbx_pf_to_vf_cmd *req;
143 struct hclgevf_cmq_ring *crq;
144 struct hclgevf_desc *desc;
150 resp = &hdev->mbx_resp;
151 crq = &hdev->hw.cmq.crq;
153 while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
154 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
155 dev_info(&hdev->pdev->dev, "vf crq need init\n");
159 desc = &crq->desc[crq->next_to_use];
160 req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
162 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
163 if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
164 dev_warn(&hdev->pdev->dev,
165 "dropped invalid mailbox message, code = %d\n",
168 /* dropping/not processing this invalid message */
169 crq->desc[crq->next_to_use].flag = 0;
170 hclge_mbx_ring_ptr_move_crq(crq);
174 /* synchronous messages are time critical and need preferential
175 * treatment. Therefore, we need to acknowledge all the sync
176 * responses as quickly as possible so that waiting tasks do not
177 * timeout and simultaneously queue the async messages for later
178 * prcessing in context of mailbox task i.e. the slow path.
180 switch (req->msg[0]) {
181 case HCLGE_MBX_PF_VF_RESP:
182 if (resp->received_resp)
183 dev_warn(&hdev->pdev->dev,
184 "VF mbx resp flag not clear(%d)\n",
186 resp->received_resp = true;
188 resp->origin_mbx_msg = (req->msg[1] << 16);
189 resp->origin_mbx_msg |= req->msg[2];
190 resp->resp_status = req->msg[3];
192 temp = (u8 *)&req->msg[4];
193 for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
194 resp->additional_info[i] = *temp;
198 case HCLGE_MBX_LINK_STAT_CHANGE:
199 case HCLGE_MBX_ASSERTING_RESET:
200 case HCLGE_MBX_LINK_STAT_MODE:
201 /* set this mbx event as pending. This is required as we
202 * might loose interrupt event when mbx task is busy
203 * handling. This shall be cleared when mbx task just
204 * enters handling state.
206 hdev->mbx_event_pending = true;
208 /* we will drop the async msg if we find ARQ as full
209 * and continue with next message
211 if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
212 dev_warn(&hdev->pdev->dev,
213 "Async Q full, dropping msg(%d)\n",
218 /* tail the async message in arq */
219 msg_q = hdev->arq.msg_q[hdev->arq.tail];
220 memcpy(&msg_q[0], req->msg,
221 HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
222 hclge_mbx_tail_ptr_move_arq(hdev->arq);
225 hclgevf_mbx_task_schedule(hdev);
229 dev_err(&hdev->pdev->dev,
230 "VF received unsupported(%d) mbx msg from PF\n",
234 crq->desc[crq->next_to_use].flag = 0;
235 hclge_mbx_ring_ptr_move_crq(crq);
238 /* Write back CMDQ_RQ header pointer, M7 need this pointer */
239 hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
243 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
245 enum hnae3_reset_type reset_type;
253 /* we can safely clear it now as we are at start of the async message
256 hdev->mbx_event_pending = false;
258 tail = hdev->arq.tail;
260 /* process all the async queue messages */
261 while (tail != hdev->arq.head) {
262 if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
263 dev_info(&hdev->pdev->dev,
264 "vf crq need init in async\n");
268 msg_q = hdev->arq.msg_q[hdev->arq.head];
271 case HCLGE_MBX_LINK_STAT_CHANGE:
272 link_status = le16_to_cpu(msg_q[1]);
273 memcpy(&speed, &msg_q[2], sizeof(speed));
274 duplex = (u8)le16_to_cpu(msg_q[4]);
275 hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
277 /* update upper layer with new link link status */
278 hclgevf_update_link_status(hdev, link_status);
279 hclgevf_update_speed_duplex(hdev, speed, duplex);
282 case HCLGE_MBX_LINK_STAT_MODE:
283 idx = (u8)le16_to_cpu(msg_q[1]);
285 memcpy(&hdev->hw.mac.supported, &msg_q[2],
286 sizeof(unsigned long));
288 memcpy(&hdev->hw.mac.advertising, &msg_q[2],
289 sizeof(unsigned long));
291 case HCLGE_MBX_ASSERTING_RESET:
292 /* PF has asserted reset hence VF should go in pending
293 * state and poll for the hardware reset status till it
294 * has been completely reset. After this stack should
295 * eventually be re-initialized.
297 reset_type = le16_to_cpu(msg_q[1]);
298 set_bit(reset_type, &hdev->reset_pending);
299 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
300 hclgevf_reset_task_schedule(hdev);
304 dev_err(&hdev->pdev->dev,
305 "fetched unsupported(%d) message from arq\n",
310 hclge_mbx_head_ptr_move_arq(hdev->arq);
312 msg_q = hdev->arq.msg_q[hdev->arq.head];