1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
4 #include <linux/device.h>
6 #include "hclge_debugfs.h"
7 #include "hclge_main.h"
11 static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
12 { .reg_type = "bios common",
13 .dfx_msg = &hclge_dbg_bios_common_reg[0],
14 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
15 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
16 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
18 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
19 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
20 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
21 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
23 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
25 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
26 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
28 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
30 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
31 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
32 { .reg_type = "igu egu",
33 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
35 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
36 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
38 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
40 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
41 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
43 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
45 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
46 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
48 .dfx_msg = &hclge_dbg_ncsi_reg[0],
49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
50 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
51 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
53 .dfx_msg = &hclge_dbg_rtc_reg[0],
54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
55 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
56 .cmd = HCLGE_OPC_DFX_RTC_REG } },
58 .dfx_msg = &hclge_dbg_ppp_reg[0],
59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
60 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
61 .cmd = HCLGE_OPC_DFX_PPP_REG } },
63 .dfx_msg = &hclge_dbg_rcb_reg[0],
64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
65 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
66 .cmd = HCLGE_OPC_DFX_RCB_REG } },
68 .dfx_msg = &hclge_dbg_tqp_reg[0],
69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
70 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
71 .cmd = HCLGE_OPC_DFX_TQP_REG } },
74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
76 #define HCLGE_GET_DFX_REG_TYPE_CNT 4
78 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
83 ret = hclge_query_bd_num_cmd_send(hdev, desc);
85 dev_err(&hdev->pdev->dev,
86 "get dfx bdnum fail, ret = %d\n", ret);
90 entries_per_desc = ARRAY_SIZE(desc[0].data);
91 index = offset % entries_per_desc;
92 return (int)desc[offset / entries_per_desc].data[index];
95 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
96 struct hclge_desc *desc_src,
97 int index, int bd_num,
98 enum hclge_opcode_type cmd)
100 struct hclge_desc *desc = desc_src;
103 hclge_cmd_setup_basic_desc(desc, cmd, true);
104 desc->data[0] = cpu_to_le32(index);
106 for (i = 1; i < bd_num; i++) {
107 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
109 hclge_cmd_setup_basic_desc(desc, cmd, true);
112 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
114 dev_err(&hdev->pdev->dev,
115 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
119 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
120 struct hclge_dbg_reg_type_info *reg_info,
125 const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
126 struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
127 struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
128 struct hclge_desc *desc_src;
129 struct hclge_desc *desc;
130 int entries_per_desc;
137 ret = kstrtouint(s, 0, &index);
138 index = (ret != 0) ? 0 : index;
141 bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
143 dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
144 reg_msg->offset, bd_num);
148 buf_len = sizeof(struct hclge_desc) * bd_num;
149 desc_src = kzalloc(buf_len, GFP_KERNEL);
151 dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
156 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
162 entries_per_desc = ARRAY_SIZE(desc->data);
163 min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
166 for (i = 0; i < min_num; i++) {
167 if (i > 0 && (i % entries_per_desc) == 0)
169 if (dfx_message->flag)
170 dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
171 dfx_message->message,
172 le32_to_cpu(desc->data[i % entries_per_desc]));
180 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
182 struct device *dev = &hdev->pdev->dev;
183 struct hclge_dbg_bitmap_cmd *bitmap;
184 int rq_id, pri_id, qset_id;
185 int port_id, nq_id, pg_id;
186 struct hclge_desc desc[2];
190 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
191 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
193 dev_err(&hdev->pdev->dev,
194 "dump dcb: bad command parameter, cnt=%d\n", cnt);
198 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
199 HCLGE_OPC_QSET_DFX_STS);
203 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
204 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
205 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
206 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
207 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
209 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
213 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
214 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
215 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
216 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
218 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
222 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
223 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
224 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
225 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
227 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
228 HCLGE_OPC_PORT_DFX_STS);
232 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
233 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
234 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
236 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
240 dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
242 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
246 dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
248 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
252 dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
253 dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
254 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
255 le32_to_cpu(desc[0].data[3]));
256 dev_info(dev, "tx_private_waterline: 0x%x\n",
257 le32_to_cpu(desc[0].data[4]));
258 dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
259 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
260 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
262 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
263 HCLGE_OPC_TM_INTERNAL_CNT);
267 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
268 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
270 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
271 HCLGE_OPC_TM_INTERNAL_STS_1);
275 dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
276 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
277 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
278 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
279 le32_to_cpu(desc[0].data[4]));
280 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
281 le32_to_cpu(desc[0].data[5]));
284 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
286 struct hclge_dbg_reg_type_info *reg_info;
287 bool has_dump = false;
290 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
291 reg_info = &hclge_dbg_reg_info[i];
292 if (!strncmp(cmd_buf, reg_info->reg_type,
293 strlen(reg_info->reg_type))) {
294 hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
299 if (strncmp(cmd_buf, "dcb", 3) == 0) {
300 hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
305 dev_info(&hdev->pdev->dev, "unknown command\n");
310 static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
311 char *title_buf, char *true_buf,
315 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
318 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
322 static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
324 struct hclge_ets_tc_weight_cmd *ets_weight;
325 struct hclge_desc desc;
328 if (!hnae3_dev_dcb_supported(hdev)) {
329 dev_info(&hdev->pdev->dev,
330 "Only DCB-supported dev supports tc\n");
334 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
336 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
338 dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
342 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
344 dev_info(&hdev->pdev->dev, "dump tc\n");
345 dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
346 ets_weight->weight_offset);
348 for (i = 0; i < HNAE3_MAX_TC; i++)
349 hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
350 "tc", "no sp mode", "sp mode");
353 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
355 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
356 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
357 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
358 enum hclge_opcode_type cmd;
359 struct hclge_desc desc;
362 cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
363 hclge_cmd_setup_basic_desc(&desc, cmd, true);
364 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
366 goto err_tm_pg_cmd_send;
368 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
369 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
370 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
371 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
373 cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
374 hclge_cmd_setup_basic_desc(&desc, cmd, true);
375 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
377 goto err_tm_pg_cmd_send;
379 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
380 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
381 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
382 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
384 cmd = HCLGE_OPC_TM_PORT_SHAPPING;
385 hclge_cmd_setup_basic_desc(&desc, cmd, true);
386 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
388 goto err_tm_pg_cmd_send;
390 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
391 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
392 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
394 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
395 hclge_cmd_setup_basic_desc(&desc, cmd, true);
396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398 goto err_tm_pg_cmd_send;
400 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
401 le32_to_cpu(desc.data[0]));
403 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
404 hclge_cmd_setup_basic_desc(&desc, cmd, true);
405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
407 goto err_tm_pg_cmd_send;
409 dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
410 le32_to_cpu(desc.data[0]));
412 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
413 hclge_cmd_setup_basic_desc(&desc, cmd, true);
414 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
416 goto err_tm_pg_cmd_send;
418 dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
419 le32_to_cpu(desc.data[0]));
421 if (!hnae3_dev_dcb_supported(hdev)) {
422 dev_info(&hdev->pdev->dev,
423 "Only DCB-supported dev supports tm mapping\n");
427 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
428 hclge_cmd_setup_basic_desc(&desc, cmd, true);
429 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
431 goto err_tm_pg_cmd_send;
433 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
434 dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
435 bp_to_qs_map_cmd->tc_id);
436 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
437 bp_to_qs_map_cmd->qs_group_id);
438 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
439 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
443 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
447 static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
449 struct hclge_priority_weight_cmd *priority_weight;
450 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
451 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
452 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
453 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
454 struct hclge_pg_weight_cmd *pg_weight;
455 struct hclge_qs_weight_cmd *qs_weight;
456 enum hclge_opcode_type cmd;
457 struct hclge_desc desc;
460 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
461 hclge_cmd_setup_basic_desc(&desc, cmd, true);
462 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
464 goto err_tm_cmd_send;
466 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
467 dev_info(&hdev->pdev->dev, "dump tm\n");
468 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
469 pg_to_pri_map->pg_id);
470 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
471 pg_to_pri_map->pri_bit_map);
473 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
474 hclge_cmd_setup_basic_desc(&desc, cmd, true);
475 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
477 goto err_tm_cmd_send;
479 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
480 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
481 le16_to_cpu(qs_to_pri_map->qs_id));
482 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
483 qs_to_pri_map->priority);
484 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
485 qs_to_pri_map->link_vld);
487 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
488 hclge_cmd_setup_basic_desc(&desc, cmd, true);
489 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
491 goto err_tm_cmd_send;
493 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
494 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
495 le16_to_cpu(nq_to_qs_map->nq_id));
496 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
497 le16_to_cpu(nq_to_qs_map->qset_id));
499 cmd = HCLGE_OPC_TM_PG_WEIGHT;
500 hclge_cmd_setup_basic_desc(&desc, cmd, true);
501 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
503 goto err_tm_cmd_send;
505 pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
506 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
507 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
509 cmd = HCLGE_OPC_TM_QS_WEIGHT;
510 hclge_cmd_setup_basic_desc(&desc, cmd, true);
511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
513 goto err_tm_cmd_send;
515 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
516 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
517 le16_to_cpu(qs_weight->qs_id));
518 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
520 cmd = HCLGE_OPC_TM_PRI_WEIGHT;
521 hclge_cmd_setup_basic_desc(&desc, cmd, true);
522 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
524 goto err_tm_cmd_send;
526 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
527 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
528 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
530 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
531 hclge_cmd_setup_basic_desc(&desc, cmd, true);
532 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
534 goto err_tm_cmd_send;
536 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
537 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
538 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
539 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
541 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
542 hclge_cmd_setup_basic_desc(&desc, cmd, true);
543 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
545 goto err_tm_cmd_send;
547 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
548 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
549 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
550 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
552 hclge_dbg_dump_tm_pg(hdev);
557 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
561 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
564 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
565 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
566 struct hclge_qs_to_pri_link_cmd *map;
567 struct hclge_tqp_tx_queue_tc_cmd *tc;
568 enum hclge_opcode_type cmd;
569 struct hclge_desc desc;
570 int queue_id, group_id;
576 ret = kstrtouint(cmd_buf, 0, &queue_id);
577 queue_id = (ret != 0) ? 0 : queue_id;
579 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
580 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
581 hclge_cmd_setup_basic_desc(&desc, cmd, true);
582 nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
583 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
585 goto err_tm_map_cmd_send;
586 qset_id = nq_to_qs_map->qset_id & 0x3FF;
588 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
589 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
590 hclge_cmd_setup_basic_desc(&desc, cmd, true);
591 map->qs_id = cpu_to_le16(qset_id);
592 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
594 goto err_tm_map_cmd_send;
595 pri_id = map->priority;
597 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
598 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
599 hclge_cmd_setup_basic_desc(&desc, cmd, true);
600 tc->queue_id = cpu_to_le16(queue_id);
601 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
603 goto err_tm_map_cmd_send;
604 tc_id = tc->tc_id & 0x7;
606 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
607 dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
608 queue_id, qset_id, pri_id, tc_id);
610 if (!hnae3_dev_dcb_supported(hdev)) {
611 dev_info(&hdev->pdev->dev,
612 "Only DCB-supported dev supports tm mapping\n");
616 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
617 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
618 for (group_id = 0; group_id < 32; group_id++) {
619 hclge_cmd_setup_basic_desc(&desc, cmd, true);
620 bp_to_qs_map_cmd->tc_id = tc_id;
621 bp_to_qs_map_cmd->qs_group_id = group_id;
622 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
624 goto err_tm_map_cmd_send;
626 qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
629 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
632 for (group_id = 0; group_id < 4; group_id++) {
633 dev_info(&hdev->pdev->dev,
634 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
635 group_id * 256, qset_maping[(u32)(i + 7)],
636 qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
637 qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
638 qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
646 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
650 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
652 struct hclge_cfg_pause_param_cmd *pause_param;
653 struct hclge_desc desc;
656 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
658 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
660 dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
665 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
666 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
667 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
668 pause_param->pause_trans_gap);
669 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
670 le16_to_cpu(pause_param->pause_trans_time));
673 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
675 struct hclge_qos_pri_map_cmd *pri_map;
676 struct hclge_desc desc;
679 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
681 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
683 dev_err(&hdev->pdev->dev,
684 "dump qos pri map fail, ret = %d\n", ret);
688 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
689 dev_info(&hdev->pdev->dev, "dump qos pri map\n");
690 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
691 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
692 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
693 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
694 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
695 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
696 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
697 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
698 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
701 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
703 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
704 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
705 struct hclge_rx_priv_wl_buf *rx_priv_wl;
706 struct hclge_rx_com_wl *rx_packet_cnt;
707 struct hclge_rx_com_thrd *rx_com_thrd;
708 struct hclge_rx_com_wl *rx_com_wl;
709 enum hclge_opcode_type cmd;
710 struct hclge_desc desc[2];
713 cmd = HCLGE_OPC_TX_BUFF_ALLOC;
714 hclge_cmd_setup_basic_desc(desc, cmd, true);
715 ret = hclge_cmd_send(&hdev->hw, desc, 1);
717 goto err_qos_cmd_send;
719 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
721 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
722 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
723 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
724 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
726 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
727 hclge_cmd_setup_basic_desc(desc, cmd, true);
728 ret = hclge_cmd_send(&hdev->hw, desc, 1);
730 goto err_qos_cmd_send;
732 dev_info(&hdev->pdev->dev, "\n");
733 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
734 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
735 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
736 le16_to_cpu(rx_buf_cmd->buf_num[i]));
738 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
739 le16_to_cpu(rx_buf_cmd->shared_buf));
741 cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
742 hclge_cmd_setup_basic_desc(desc, cmd, true);
743 ret = hclge_cmd_send(&hdev->hw, desc, 1);
745 goto err_qos_cmd_send;
747 rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
748 dev_info(&hdev->pdev->dev, "\n");
749 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
750 le16_to_cpu(rx_com_wl->com_wl.high),
751 le16_to_cpu(rx_com_wl->com_wl.low));
753 cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
754 hclge_cmd_setup_basic_desc(desc, cmd, true);
755 ret = hclge_cmd_send(&hdev->hw, desc, 1);
757 goto err_qos_cmd_send;
759 rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
760 dev_info(&hdev->pdev->dev,
761 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
762 le16_to_cpu(rx_packet_cnt->com_wl.high),
763 le16_to_cpu(rx_packet_cnt->com_wl.low));
764 dev_info(&hdev->pdev->dev, "\n");
766 if (!hnae3_dev_dcb_supported(hdev)) {
767 dev_info(&hdev->pdev->dev,
768 "Only DCB-supported dev supports rx priv wl\n");
771 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
772 hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
773 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
774 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
775 ret = hclge_cmd_send(&hdev->hw, desc, 2);
777 goto err_qos_cmd_send;
779 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
780 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
781 dev_info(&hdev->pdev->dev,
782 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
783 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
784 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
786 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
787 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
788 dev_info(&hdev->pdev->dev,
789 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
790 i + HCLGE_TC_NUM_ONE_DESC,
791 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
792 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
794 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
795 hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
796 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
797 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
798 ret = hclge_cmd_send(&hdev->hw, desc, 2);
800 goto err_qos_cmd_send;
802 dev_info(&hdev->pdev->dev, "\n");
803 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
804 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
805 dev_info(&hdev->pdev->dev,
806 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
807 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
808 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
810 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
811 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
812 dev_info(&hdev->pdev->dev,
813 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
814 i + HCLGE_TC_NUM_ONE_DESC,
815 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
816 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
820 dev_err(&hdev->pdev->dev,
821 "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
824 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
826 struct hclge_mac_ethertype_idx_rd_cmd *req0;
827 char printf_buf[HCLGE_DBG_BUF_LEN];
828 struct hclge_desc desc;
831 dev_info(&hdev->pdev->dev, "mng tab:\n");
832 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
834 "entry|mac_addr |mask|ether|mask|vlan|mask",
835 HCLGE_DBG_BUF_LEN - 1);
836 strncat(printf_buf + strlen(printf_buf),
837 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
838 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
840 dev_info(&hdev->pdev->dev, "%s", printf_buf);
842 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
843 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
845 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
846 req0->index = cpu_to_le16(i);
848 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
850 dev_err(&hdev->pdev->dev,
851 "call hclge_cmd_send fail, ret = %d\n", ret);
855 if (!req0->resp_code)
858 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
859 snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
860 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
861 le16_to_cpu(req0->index),
862 req0->mac_addr[0], req0->mac_addr[1],
863 req0->mac_addr[2], req0->mac_addr[3],
864 req0->mac_addr[4], req0->mac_addr[5]);
866 snprintf(printf_buf + strlen(printf_buf),
867 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
868 "%x |%04x |%x |%04x|%x |%02x |%02x |",
869 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
871 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
872 req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
873 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
874 req0->i_port_bitmap, req0->i_port_direction);
876 snprintf(printf_buf + strlen(printf_buf),
877 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
878 "%d |%d |%02d |%04d|%x\n",
879 !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
880 req0->egress_port & HCLGE_DBG_MNG_PF_ID,
881 (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
883 !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
885 dev_info(&hdev->pdev->dev, "%s", printf_buf);
889 static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
892 struct hclge_fd_tcam_config_1_cmd *req1;
893 struct hclge_fd_tcam_config_2_cmd *req2;
894 struct hclge_fd_tcam_config_3_cmd *req3;
895 struct hclge_desc desc[3];
899 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
900 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
901 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
902 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
903 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
905 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
906 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
907 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
910 req1->xy_sel = sel_x ? 1 : 0;
911 req1->index = cpu_to_le32(loc);
913 ret = hclge_cmd_send(&hdev->hw, desc, 3);
917 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
918 sel_x ? "x" : "y", loc);
920 /* tcam_data0 ~ tcam_data1 */
921 req = (u32 *)req1->tcam_data;
922 for (i = 0; i < 2; i++)
923 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
925 /* tcam_data2 ~ tcam_data7 */
926 req = (u32 *)req2->tcam_data;
927 for (i = 0; i < 6; i++)
928 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
930 /* tcam_data8 ~ tcam_data12 */
931 req = (u32 *)req3->tcam_data;
932 for (i = 0; i < 5; i++)
933 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
936 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
940 for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
941 hclge_dbg_fd_tcam_read(hdev, 0, true, i);
942 hclge_dbg_fd_tcam_read(hdev, 0, false, i);
946 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
948 dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
949 hdev->rst_stats.pf_rst_cnt);
950 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
951 hdev->rst_stats.flr_rst_cnt);
952 dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
953 hdev->rst_stats.global_rst_cnt);
954 dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
955 hdev->rst_stats.imp_rst_cnt);
956 dev_info(&hdev->pdev->dev, "reset done count: %u\n",
957 hdev->rst_stats.reset_done_cnt);
958 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
959 hdev->rst_stats.hw_reset_done_cnt);
960 dev_info(&hdev->pdev->dev, "reset count: %u\n",
961 hdev->rst_stats.reset_cnt);
962 dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
963 hdev->rst_stats.reset_fail_cnt);
964 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
965 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
966 dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
967 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
968 dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
969 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
970 dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
971 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
972 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
973 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
974 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
975 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
976 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
979 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
981 struct hclge_desc *desc_src, *desc_tmp;
982 struct hclge_get_m7_bd_cmd *req;
983 struct hclge_desc desc;
987 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
989 req = (struct hclge_get_m7_bd_cmd *)desc.data;
990 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
992 dev_err(&hdev->pdev->dev,
993 "get firmware statistics bd number failed, ret = %d\n",
998 bd_num = le32_to_cpu(req->bd_num);
1000 buf_len = sizeof(struct hclge_desc) * bd_num;
1001 desc_src = kzalloc(buf_len, GFP_KERNEL);
1003 dev_err(&hdev->pdev->dev,
1004 "allocate desc for get_m7_stats failed\n");
1008 desc_tmp = desc_src;
1009 ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
1010 HCLGE_OPC_M7_STATS_INFO);
1013 dev_err(&hdev->pdev->dev,
1014 "get firmware statistics failed, ret = %d\n", ret);
1018 for (i = 0; i < bd_num; i++) {
1019 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
1020 le32_to_cpu(desc_tmp->data[0]),
1021 le32_to_cpu(desc_tmp->data[1]),
1022 le32_to_cpu(desc_tmp->data[2]));
1023 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
1024 le32_to_cpu(desc_tmp->data[3]),
1025 le32_to_cpu(desc_tmp->data[4]),
1026 le32_to_cpu(desc_tmp->data[5]));
1034 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1036 static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
1037 struct hclge_desc *desc, int *offset,
1040 #define HCLGE_CMD_DATA_NUM 6
1045 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1046 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1047 if (i == 0 && j == 0)
1050 dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
1052 le32_to_cpu(desc[i].data[j]));
1053 *offset += sizeof(u32);
1054 *length -= sizeof(u32);
1061 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
1062 * @hdev: pointer to struct hclge_dev
1063 * @cmd_buf: string that contains offset and length
1065 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
1066 const char *cmd_buf)
1068 #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
1069 #define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
1071 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1072 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1078 ret = sscanf(cmd_buf, "%x %x", &offset, &length);
1079 if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
1080 length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
1081 dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
1084 if (offset < 0 || length <= 0) {
1085 dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
1089 dev_info(&hdev->pdev->dev, "offset | data\n");
1091 while (length > 0) {
1093 if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
1094 data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
1096 data0 |= length << 16;
1097 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1098 HCLGE_OPC_QUERY_NCL_CONFIG);
1102 hclge_ncl_config_data_print(hdev, desc, &offset, &length);
1106 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1107 * @hdev: pointer to struct hclge_dev
1109 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
1111 #define HCLGE_BILLION_NANO_SECONDS 1000000000
1113 struct hclge_mac_tnl_stats stats;
1114 unsigned long rem_nsec;
1116 dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
1118 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1119 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1120 dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
1121 (unsigned long)stats.time, rem_nsec / 1000,
1126 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
1128 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1129 u8 ir_u, ir_b, ir_s, bs_b, bs_s;
1130 struct hclge_desc desc;
1134 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1136 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1137 shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
1139 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1141 dev_err(&hdev->pdev->dev,
1142 "qs%u failed to get tx_rate, ret=%d\n",
1147 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1148 ir_b = hclge_tm_get_field(shapping_para, IR_B);
1149 ir_u = hclge_tm_get_field(shapping_para, IR_U);
1150 ir_s = hclge_tm_get_field(shapping_para, IR_S);
1151 bs_b = hclge_tm_get_field(shapping_para, BS_B);
1152 bs_s = hclge_tm_get_field(shapping_para, BS_S);
1154 dev_info(&hdev->pdev->dev,
1155 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
1156 qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
1159 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
1161 struct hnae3_knic_private_info *kinfo;
1162 struct hclge_vport *vport;
1165 for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
1166 vport = &hdev->vport[vport_id];
1167 kinfo = &vport->nic.kinfo;
1169 dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
1171 for (i = 0; i < kinfo->num_tc; i++) {
1172 u16 qsid = vport->qs_offset + i;
1174 hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1179 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
1180 const char *cmd_buf)
1182 #define HCLGE_MAX_QSET_NUM 1024
1187 ret = kstrtou16(cmd_buf, 0, &qsid);
1189 hclge_dbg_dump_qs_shaper_all(hdev);
1193 if (qsid >= HCLGE_MAX_QSET_NUM) {
1194 dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
1199 hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1202 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
1204 #define DUMP_REG "dump reg"
1205 #define DUMP_TM_MAP "dump tm map"
1207 struct hclge_vport *vport = hclge_get_vport(handle);
1208 struct hclge_dev *hdev = vport->back;
1210 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
1211 hclge_dbg_fd_tcam(hdev);
1212 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
1213 hclge_dbg_dump_tc(hdev);
1214 } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
1215 hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
1216 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
1217 hclge_dbg_dump_tm(hdev);
1218 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
1219 hclge_dbg_dump_qos_pause_cfg(hdev);
1220 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
1221 hclge_dbg_dump_qos_pri_map(hdev);
1222 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
1223 hclge_dbg_dump_qos_buf_cfg(hdev);
1224 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
1225 hclge_dbg_dump_mng_table(hdev);
1226 } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
1227 hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
1228 } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
1229 hclge_dbg_dump_rst_info(hdev);
1230 } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
1231 hclge_dbg_get_m7_stats_info(hdev);
1232 } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
1233 hclge_dbg_dump_ncl_config(hdev,
1234 &cmd_buf[sizeof("dump ncl_config")]);
1235 } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
1236 hclge_dbg_dump_mac_tnl_status(hdev);
1237 } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
1238 hclge_dbg_dump_qs_shaper(hdev,
1239 &cmd_buf[sizeof("dump qs shaper")]);
1241 dev_info(&hdev->pdev->dev, "unknown command\n");