]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
f30d1126d378c9bc25dbd2ff8d830bf423473446
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9
10 enum hclge_shaper_level {
11         HCLGE_SHAPER_LVL_PRI    = 0,
12         HCLGE_SHAPER_LVL_PG     = 1,
13         HCLGE_SHAPER_LVL_PORT   = 2,
14         HCLGE_SHAPER_LVL_QSET   = 3,
15         HCLGE_SHAPER_LVL_CNT    = 4,
16         HCLGE_SHAPER_LVL_VF     = 0,
17         HCLGE_SHAPER_LVL_PF     = 1,
18 };
19
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM    3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD    3
22
23 #define HCLGE_SHAPER_BS_U_DEF   5
24 #define HCLGE_SHAPER_BS_S_DEF   20
25
26 #define HCLGE_ETHER_MAX_RATE    100000
27
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29  * @ir: Rate to be config, its unit is Mbps
30  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31  * @ir_b: IR_B parameter of IR shaper
32  * @ir_u: IR_U parameter of IR shaper
33  * @ir_s: IR_S parameter of IR shaper
34  *
35  * the formula:
36  *
37  *              IR_b * (2 ^ IR_u) * 8
38  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
39  *              Tick * (2 ^ IR_s)
40  *
41  * @return: 0: calculate sucessful, negative: fail
42  */
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44                                   u8 *ir_b, u8 *ir_u, u8 *ir_s)
45 {
46 #define DIVISOR_CLK             (1000 * 8)
47 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
48
49         const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50                 6 * 256,        /* Prioriy level */
51                 6 * 32,         /* Prioriy group level */
52                 6 * 8,          /* Port level */
53                 6 * 256         /* Qset level */
54         };
55         u8 ir_u_calc = 0;
56         u8 ir_s_calc = 0;
57         u32 ir_calc;
58         u32 tick;
59
60         /* Calc tick */
61         if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
62             ir > HCLGE_ETHER_MAX_RATE)
63                 return -EINVAL;
64
65         tick = tick_array[shaper_level];
66
67         /**
68          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69          * the formula is changed to:
70          *              126 * 1 * 8
71          * ir_calc = ---------------- * 1000
72          *              tick * 1
73          */
74         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
75
76         if (ir_calc == ir) {
77                 *ir_b = 126;
78                 *ir_u = 0;
79                 *ir_s = 0;
80
81                 return 0;
82         } else if (ir_calc > ir) {
83                 /* Increasing the denominator to select ir_s value */
84                 while (ir_calc > ir) {
85                         ir_s_calc++;
86                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
87                 }
88
89                 if (ir_calc == ir)
90                         *ir_b = 126;
91                 else
92                         *ir_b = (ir * tick * (1 << ir_s_calc) +
93                                  (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
94         } else {
95                 /* Increasing the numerator to select ir_u value */
96                 u32 numerator;
97
98                 while (ir_calc < ir) {
99                         ir_u_calc++;
100                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
101                         ir_calc = (numerator + (tick >> 1)) / tick;
102                 }
103
104                 if (ir_calc == ir) {
105                         *ir_b = 126;
106                 } else {
107                         u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
108                         *ir_b = (ir * tick + (denominator >> 1)) / denominator;
109                 }
110         }
111
112         *ir_u = ir_u_calc;
113         *ir_s = ir_s_calc;
114
115         return 0;
116 }
117
118 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
119                                enum hclge_opcode_type opcode, u64 *stats)
120 {
121         struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
122         int ret, i, j;
123
124         if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
125               opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
126                 return -EINVAL;
127
128         for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
129                 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
130                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
131         }
132
133         hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
134
135         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
136         if (ret)
137                 return ret;
138
139         for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
140                 struct hclge_pfc_stats_cmd *pfc_stats =
141                                 (struct hclge_pfc_stats_cmd *)desc[i].data;
142
143                 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
144                         u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
145
146                         if (index < HCLGE_MAX_TC_NUM)
147                                 stats[index] =
148                                         le64_to_cpu(pfc_stats->pkt_num[j]);
149                 }
150         }
151         return 0;
152 }
153
154 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
155 {
156         return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
157 }
158
159 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
160 {
161         return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
162 }
163
164 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
165 {
166         struct hclge_desc desc;
167
168         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
169
170         desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
171                 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
172
173         return hclge_cmd_send(&hdev->hw, &desc, 1);
174 }
175
176 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
177                                   u8 pfc_bitmap)
178 {
179         struct hclge_desc desc;
180         struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
181
182         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
183
184         pfc->tx_rx_en_bitmap = tx_rx_bitmap;
185         pfc->pri_en_bitmap = pfc_bitmap;
186
187         return hclge_cmd_send(&hdev->hw, &desc, 1);
188 }
189
190 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
191                                  u8 pause_trans_gap, u16 pause_trans_time)
192 {
193         struct hclge_cfg_pause_param_cmd *pause_param;
194         struct hclge_desc desc;
195
196         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
197
198         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
199
200         ether_addr_copy(pause_param->mac_addr, addr);
201         ether_addr_copy(pause_param->mac_addr_extra, addr);
202         pause_param->pause_trans_gap = pause_trans_gap;
203         pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
204
205         return hclge_cmd_send(&hdev->hw, &desc, 1);
206 }
207
208 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
209 {
210         struct hclge_cfg_pause_param_cmd *pause_param;
211         struct hclge_desc desc;
212         u16 trans_time;
213         u8 trans_gap;
214         int ret;
215
216         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
217
218         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
219
220         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
221         if (ret)
222                 return ret;
223
224         trans_gap = pause_param->pause_trans_gap;
225         trans_time = le16_to_cpu(pause_param->pause_trans_time);
226
227         return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
228 }
229
230 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
231 {
232         u8 tc;
233
234         tc = hdev->tm_info.prio_tc[pri_id];
235
236         if (tc >= hdev->tm_info.num_tc)
237                 return -EINVAL;
238
239         /**
240          * the register for priority has four bytes, the first bytes includes
241          *  priority0 and priority1, the higher 4bit stands for priority1
242          *  while the lower 4bit stands for priority0, as below:
243          * first byte:  | pri_1 | pri_0 |
244          * second byte: | pri_3 | pri_2 |
245          * third byte:  | pri_5 | pri_4 |
246          * fourth byte: | pri_7 | pri_6 |
247          */
248         pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
249
250         return 0;
251 }
252
253 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
254 {
255         struct hclge_desc desc;
256         u8 *pri = (u8 *)desc.data;
257         u8 pri_id;
258         int ret;
259
260         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
261
262         for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
263                 ret = hclge_fill_pri_array(hdev, pri, pri_id);
264                 if (ret)
265                         return ret;
266         }
267
268         return hclge_cmd_send(&hdev->hw, &desc, 1);
269 }
270
271 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
272                                       u8 pg_id, u8 pri_bit_map)
273 {
274         struct hclge_pg_to_pri_link_cmd *map;
275         struct hclge_desc desc;
276
277         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
278
279         map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
280
281         map->pg_id = pg_id;
282         map->pri_bit_map = pri_bit_map;
283
284         return hclge_cmd_send(&hdev->hw, &desc, 1);
285 }
286
287 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
288                                       u16 qs_id, u8 pri)
289 {
290         struct hclge_qs_to_pri_link_cmd *map;
291         struct hclge_desc desc;
292
293         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
294
295         map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
296
297         map->qs_id = cpu_to_le16(qs_id);
298         map->priority = pri;
299         map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
300
301         return hclge_cmd_send(&hdev->hw, &desc, 1);
302 }
303
304 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
305                                     u16 q_id, u16 qs_id)
306 {
307         struct hclge_nq_to_qs_link_cmd *map;
308         struct hclge_desc desc;
309
310         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
311
312         map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
313
314         map->nq_id = cpu_to_le16(q_id);
315         map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
316
317         return hclge_cmd_send(&hdev->hw, &desc, 1);
318 }
319
320 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
321                                   u8 dwrr)
322 {
323         struct hclge_pg_weight_cmd *weight;
324         struct hclge_desc desc;
325
326         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
327
328         weight = (struct hclge_pg_weight_cmd *)desc.data;
329
330         weight->pg_id = pg_id;
331         weight->dwrr = dwrr;
332
333         return hclge_cmd_send(&hdev->hw, &desc, 1);
334 }
335
336 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
337                                    u8 dwrr)
338 {
339         struct hclge_priority_weight_cmd *weight;
340         struct hclge_desc desc;
341
342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
343
344         weight = (struct hclge_priority_weight_cmd *)desc.data;
345
346         weight->pri_id = pri_id;
347         weight->dwrr = dwrr;
348
349         return hclge_cmd_send(&hdev->hw, &desc, 1);
350 }
351
352 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
353                                   u8 dwrr)
354 {
355         struct hclge_qs_weight_cmd *weight;
356         struct hclge_desc desc;
357
358         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
359
360         weight = (struct hclge_qs_weight_cmd *)desc.data;
361
362         weight->qs_id = cpu_to_le16(qs_id);
363         weight->dwrr = dwrr;
364
365         return hclge_cmd_send(&hdev->hw, &desc, 1);
366 }
367
368 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
369                                       u8 bs_b, u8 bs_s)
370 {
371         u32 shapping_para = 0;
372
373         hclge_tm_set_field(shapping_para, IR_B, ir_b);
374         hclge_tm_set_field(shapping_para, IR_U, ir_u);
375         hclge_tm_set_field(shapping_para, IR_S, ir_s);
376         hclge_tm_set_field(shapping_para, BS_B, bs_b);
377         hclge_tm_set_field(shapping_para, BS_S, bs_s);
378
379         return shapping_para;
380 }
381
382 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
383                                     enum hclge_shap_bucket bucket, u8 pg_id,
384                                     u32 shapping_para)
385 {
386         struct hclge_pg_shapping_cmd *shap_cfg_cmd;
387         enum hclge_opcode_type opcode;
388         struct hclge_desc desc;
389
390         opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
391                  HCLGE_OPC_TM_PG_C_SHAPPING;
392         hclge_cmd_setup_basic_desc(&desc, opcode, false);
393
394         shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
395
396         shap_cfg_cmd->pg_id = pg_id;
397
398         shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
399
400         return hclge_cmd_send(&hdev->hw, &desc, 1);
401 }
402
403 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
404 {
405         struct hclge_port_shapping_cmd *shap_cfg_cmd;
406         struct hclge_desc desc;
407         u32 shapping_para = 0;
408         u8 ir_u, ir_b, ir_s;
409         int ret;
410
411         ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
412                                      HCLGE_SHAPER_LVL_PORT,
413                                      &ir_b, &ir_u, &ir_s);
414         if (ret)
415                 return ret;
416
417         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
418         shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
419
420         shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
421                                                    HCLGE_SHAPER_BS_U_DEF,
422                                                    HCLGE_SHAPER_BS_S_DEF);
423
424         shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
425
426         return hclge_cmd_send(&hdev->hw, &desc, 1);
427 }
428
429 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
430                                      enum hclge_shap_bucket bucket, u8 pri_id,
431                                      u32 shapping_para)
432 {
433         struct hclge_pri_shapping_cmd *shap_cfg_cmd;
434         enum hclge_opcode_type opcode;
435         struct hclge_desc desc;
436
437         opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
438                  HCLGE_OPC_TM_PRI_C_SHAPPING;
439
440         hclge_cmd_setup_basic_desc(&desc, opcode, false);
441
442         shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
443
444         shap_cfg_cmd->pri_id = pri_id;
445
446         shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
447
448         return hclge_cmd_send(&hdev->hw, &desc, 1);
449 }
450
451 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
452 {
453         struct hclge_desc desc;
454
455         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
456
457         if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
458                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
459         else
460                 desc.data[1] = 0;
461
462         desc.data[0] = cpu_to_le32(pg_id);
463
464         return hclge_cmd_send(&hdev->hw, &desc, 1);
465 }
466
467 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
468 {
469         struct hclge_desc desc;
470
471         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
472
473         if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
474                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
475         else
476                 desc.data[1] = 0;
477
478         desc.data[0] = cpu_to_le32(pri_id);
479
480         return hclge_cmd_send(&hdev->hw, &desc, 1);
481 }
482
483 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
484 {
485         struct hclge_desc desc;
486
487         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
488
489         if (mode == HCLGE_SCH_MODE_DWRR)
490                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
491         else
492                 desc.data[1] = 0;
493
494         desc.data[0] = cpu_to_le32(qs_id);
495
496         return hclge_cmd_send(&hdev->hw, &desc, 1);
497 }
498
499 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
500                               u32 bit_map)
501 {
502         struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
503         struct hclge_desc desc;
504
505         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
506                                    false);
507
508         bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
509
510         bp_to_qs_map_cmd->tc_id = tc;
511         bp_to_qs_map_cmd->qs_group_id = grp_id;
512         bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
513
514         return hclge_cmd_send(&hdev->hw, &desc, 1);
515 }
516
517 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
518 {
519         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
520         struct hclge_dev *hdev = vport->back;
521         u16 max_rss_size;
522         u8 i;
523
524         /* TC configuration is shared by PF/VF in one port, only allow
525          * one tc for VF for simplicity. VF's vport_id is non zero.
526          */
527         kinfo->num_tc = vport->vport_id ? 1 :
528                         min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
529         vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
530                                 (vport->vport_id ? (vport->vport_id - 1) : 0);
531
532         max_rss_size = min_t(u16, hdev->rss_size_max,
533                              vport->alloc_tqps / kinfo->num_tc);
534
535         /* Set to user value, no larger than max_rss_size. */
536         if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
537             kinfo->req_rss_size <= max_rss_size) {
538                 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
539                          kinfo->rss_size, kinfo->req_rss_size);
540                 kinfo->rss_size = kinfo->req_rss_size;
541         } else if (kinfo->rss_size > max_rss_size ||
542                    (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
543                 /* Set to the maximum specification value (max_rss_size). */
544                 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
545                          kinfo->rss_size, max_rss_size);
546                 kinfo->rss_size = max_rss_size;
547         }
548
549         kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
550         vport->dwrr = 100;  /* 100 percent as init */
551         vport->alloc_rss_size = kinfo->rss_size;
552         vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
553
554         for (i = 0; i < HNAE3_MAX_TC; i++) {
555                 if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
556                         kinfo->tc_info[i].enable = true;
557                         kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
558                         kinfo->tc_info[i].tqp_count = kinfo->rss_size;
559                         kinfo->tc_info[i].tc = i;
560                 } else {
561                         /* Set to default queue if TC is disable */
562                         kinfo->tc_info[i].enable = false;
563                         kinfo->tc_info[i].tqp_offset = 0;
564                         kinfo->tc_info[i].tqp_count = 1;
565                         kinfo->tc_info[i].tc = 0;
566                 }
567         }
568
569         memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
570                FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
571 }
572
573 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
574 {
575         struct hclge_vport *vport = hdev->vport;
576         u32 i;
577
578         for (i = 0; i < hdev->num_alloc_vport; i++) {
579                 hclge_tm_vport_tc_info_update(vport);
580
581                 vport++;
582         }
583 }
584
585 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
586 {
587         u8 i;
588
589         for (i = 0; i < hdev->tm_info.num_tc; i++) {
590                 hdev->tm_info.tc_info[i].tc_id = i;
591                 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
592                 hdev->tm_info.tc_info[i].pgid = 0;
593                 hdev->tm_info.tc_info[i].bw_limit =
594                         hdev->tm_info.pg_info[0].bw_limit;
595         }
596
597         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
598                 hdev->tm_info.prio_tc[i] =
599                         (i >= hdev->tm_info.num_tc) ? 0 : i;
600
601         /* DCB is enabled if we have more than 1 TC or pfc_en is
602          * non-zero.
603          */
604         if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
605                 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
606         else
607                 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
608 }
609
610 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
611 {
612 #define BW_PERCENT      100
613
614         u8 i;
615
616         for (i = 0; i < hdev->tm_info.num_pg; i++) {
617                 int k;
618
619                 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
620
621                 hdev->tm_info.pg_info[i].pg_id = i;
622                 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
623
624                 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
625
626                 if (i != 0)
627                         continue;
628
629                 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
630                 for (k = 0; k < hdev->tm_info.num_tc; k++)
631                         hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
632         }
633 }
634
635 static void hclge_pfc_info_init(struct hclge_dev *hdev)
636 {
637         if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
638                 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
639                         dev_warn(&hdev->pdev->dev,
640                                  "DCB is disable, but last mode is FC_PFC\n");
641
642                 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
643         } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
644                 /* fc_mode_last_time record the last fc_mode when
645                  * DCB is enabled, so that fc_mode can be set to
646                  * the correct value when DCB is disabled.
647                  */
648                 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
649                 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
650         }
651 }
652
653 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
654 {
655         hclge_tm_pg_info_init(hdev);
656
657         hclge_tm_tc_info_init(hdev);
658
659         hclge_tm_vport_info_update(hdev);
660
661         hclge_pfc_info_init(hdev);
662 }
663
664 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
665 {
666         int ret;
667         u32 i;
668
669         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
670                 return 0;
671
672         for (i = 0; i < hdev->tm_info.num_pg; i++) {
673                 /* Cfg mapping */
674                 ret = hclge_tm_pg_to_pri_map_cfg(
675                         hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
676                 if (ret)
677                         return ret;
678         }
679
680         return 0;
681 }
682
683 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
684 {
685         u8 ir_u, ir_b, ir_s;
686         u32 shaper_para;
687         int ret;
688         u32 i;
689
690         /* Cfg pg schd */
691         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
692                 return 0;
693
694         /* Pg to pri */
695         for (i = 0; i < hdev->tm_info.num_pg; i++) {
696                 /* Calc shaper para */
697                 ret = hclge_shaper_para_calc(
698                                         hdev->tm_info.pg_info[i].bw_limit,
699                                         HCLGE_SHAPER_LVL_PG,
700                                         &ir_b, &ir_u, &ir_s);
701                 if (ret)
702                         return ret;
703
704                 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
705                                                          HCLGE_SHAPER_BS_U_DEF,
706                                                          HCLGE_SHAPER_BS_S_DEF);
707                 ret = hclge_tm_pg_shapping_cfg(hdev,
708                                                HCLGE_TM_SHAP_C_BUCKET, i,
709                                                shaper_para);
710                 if (ret)
711                         return ret;
712
713                 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
714                                                          HCLGE_SHAPER_BS_U_DEF,
715                                                          HCLGE_SHAPER_BS_S_DEF);
716                 ret = hclge_tm_pg_shapping_cfg(hdev,
717                                                HCLGE_TM_SHAP_P_BUCKET, i,
718                                                shaper_para);
719                 if (ret)
720                         return ret;
721         }
722
723         return 0;
724 }
725
726 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
727 {
728         int ret;
729         u32 i;
730
731         /* cfg pg schd */
732         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
733                 return 0;
734
735         /* pg to prio */
736         for (i = 0; i < hdev->tm_info.num_pg; i++) {
737                 /* Cfg dwrr */
738                 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
739                 if (ret)
740                         return ret;
741         }
742
743         return 0;
744 }
745
746 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
747                                    struct hclge_vport *vport)
748 {
749         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
750         struct hnae3_queue **tqp = kinfo->tqp;
751         struct hnae3_tc_info *v_tc_info;
752         u32 i, j;
753         int ret;
754
755         for (i = 0; i < kinfo->num_tc; i++) {
756                 v_tc_info = &kinfo->tc_info[i];
757                 for (j = 0; j < v_tc_info->tqp_count; j++) {
758                         struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
759
760                         ret = hclge_tm_q_to_qs_map_cfg(hdev,
761                                                        hclge_get_queue_id(q),
762                                                        vport->qs_offset + i);
763                         if (ret)
764                                 return ret;
765                 }
766         }
767
768         return 0;
769 }
770
771 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
772 {
773         struct hclge_vport *vport = hdev->vport;
774         int ret;
775         u32 i, k;
776
777         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
778                 /* Cfg qs -> pri mapping, one by one mapping */
779                 for (k = 0; k < hdev->num_alloc_vport; k++) {
780                         struct hnae3_knic_private_info *kinfo =
781                                 &vport[k].nic.kinfo;
782
783                         for (i = 0; i < kinfo->num_tc; i++) {
784                                 ret = hclge_tm_qs_to_pri_map_cfg(
785                                         hdev, vport[k].qs_offset + i, i);
786                                 if (ret)
787                                         return ret;
788                         }
789                 }
790         } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
791                 /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
792                 for (k = 0; k < hdev->num_alloc_vport; k++)
793                         for (i = 0; i < HNAE3_MAX_TC; i++) {
794                                 ret = hclge_tm_qs_to_pri_map_cfg(
795                                         hdev, vport[k].qs_offset + i, k);
796                                 if (ret)
797                                         return ret;
798                         }
799         } else {
800                 return -EINVAL;
801         }
802
803         /* Cfg q -> qs mapping */
804         for (i = 0; i < hdev->num_alloc_vport; i++) {
805                 ret = hclge_vport_q_to_qs_map(hdev, vport);
806                 if (ret)
807                         return ret;
808
809                 vport++;
810         }
811
812         return 0;
813 }
814
815 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
816 {
817         u8 ir_u, ir_b, ir_s;
818         u32 shaper_para;
819         int ret;
820         u32 i;
821
822         for (i = 0; i < hdev->tm_info.num_tc; i++) {
823                 ret = hclge_shaper_para_calc(
824                                         hdev->tm_info.tc_info[i].bw_limit,
825                                         HCLGE_SHAPER_LVL_PRI,
826                                         &ir_b, &ir_u, &ir_s);
827                 if (ret)
828                         return ret;
829
830                 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
831                                                          HCLGE_SHAPER_BS_U_DEF,
832                                                          HCLGE_SHAPER_BS_S_DEF);
833                 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
834                                                 shaper_para);
835                 if (ret)
836                         return ret;
837
838                 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
839                                                          HCLGE_SHAPER_BS_U_DEF,
840                                                          HCLGE_SHAPER_BS_S_DEF);
841                 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
842                                                 shaper_para);
843                 if (ret)
844                         return ret;
845         }
846
847         return 0;
848 }
849
850 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
851 {
852         struct hclge_dev *hdev = vport->back;
853         u8 ir_u, ir_b, ir_s;
854         u32 shaper_para;
855         int ret;
856
857         ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
858                                      &ir_b, &ir_u, &ir_s);
859         if (ret)
860                 return ret;
861
862         shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
863                                                  HCLGE_SHAPER_BS_U_DEF,
864                                                  HCLGE_SHAPER_BS_S_DEF);
865         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
866                                         vport->vport_id, shaper_para);
867         if (ret)
868                 return ret;
869
870         shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
871                                                  HCLGE_SHAPER_BS_U_DEF,
872                                                  HCLGE_SHAPER_BS_S_DEF);
873         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
874                                         vport->vport_id, shaper_para);
875         if (ret)
876                 return ret;
877
878         return 0;
879 }
880
881 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
882 {
883         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
884         struct hclge_dev *hdev = vport->back;
885         u8 ir_u, ir_b, ir_s;
886         u32 i;
887         int ret;
888
889         for (i = 0; i < kinfo->num_tc; i++) {
890                 ret = hclge_shaper_para_calc(
891                                         hdev->tm_info.tc_info[i].bw_limit,
892                                         HCLGE_SHAPER_LVL_QSET,
893                                         &ir_b, &ir_u, &ir_s);
894                 if (ret)
895                         return ret;
896         }
897
898         return 0;
899 }
900
901 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
902 {
903         struct hclge_vport *vport = hdev->vport;
904         int ret;
905         u32 i;
906
907         /* Need config vport shaper */
908         for (i = 0; i < hdev->num_alloc_vport; i++) {
909                 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
910                 if (ret)
911                         return ret;
912
913                 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
914                 if (ret)
915                         return ret;
916
917                 vport++;
918         }
919
920         return 0;
921 }
922
923 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
924 {
925         int ret;
926
927         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
928                 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
929                 if (ret)
930                         return ret;
931         } else {
932                 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
933                 if (ret)
934                         return ret;
935         }
936
937         return 0;
938 }
939
940 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
941 {
942         struct hclge_vport *vport = hdev->vport;
943         struct hclge_pg_info *pg_info;
944         u8 dwrr;
945         int ret;
946         u32 i, k;
947
948         for (i = 0; i < hdev->tm_info.num_tc; i++) {
949                 pg_info =
950                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
951                 dwrr = pg_info->tc_dwrr[i];
952
953                 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
954                 if (ret)
955                         return ret;
956
957                 for (k = 0; k < hdev->num_alloc_vport; k++) {
958                         ret = hclge_tm_qs_weight_cfg(
959                                 hdev, vport[k].qs_offset + i,
960                                 vport[k].dwrr);
961                         if (ret)
962                                 return ret;
963                 }
964         }
965
966         return 0;
967 }
968
969 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
970 {
971 #define DEFAULT_TC_WEIGHT       1
972 #define DEFAULT_TC_OFFSET       14
973
974         struct hclge_ets_tc_weight_cmd *ets_weight;
975         struct hclge_desc desc;
976         unsigned int i;
977
978         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
979         ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
980
981         for (i = 0; i < HNAE3_MAX_TC; i++) {
982                 struct hclge_pg_info *pg_info;
983
984                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
985
986                 if (!(hdev->hw_tc_map & BIT(i)))
987                         continue;
988
989                 pg_info =
990                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
991                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
992         }
993
994         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
995
996         return hclge_cmd_send(&hdev->hw, &desc, 1);
997 }
998
999 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1000 {
1001         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1002         struct hclge_dev *hdev = vport->back;
1003         int ret;
1004         u8 i;
1005
1006         /* Vf dwrr */
1007         ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1008         if (ret)
1009                 return ret;
1010
1011         /* Qset dwrr */
1012         for (i = 0; i < kinfo->num_tc; i++) {
1013                 ret = hclge_tm_qs_weight_cfg(
1014                         hdev, vport->qs_offset + i,
1015                         hdev->tm_info.pg_info[0].tc_dwrr[i]);
1016                 if (ret)
1017                         return ret;
1018         }
1019
1020         return 0;
1021 }
1022
1023 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1024 {
1025         struct hclge_vport *vport = hdev->vport;
1026         int ret;
1027         u32 i;
1028
1029         for (i = 0; i < hdev->num_alloc_vport; i++) {
1030                 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1031                 if (ret)
1032                         return ret;
1033
1034                 vport++;
1035         }
1036
1037         return 0;
1038 }
1039
1040 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1041 {
1042         int ret;
1043
1044         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1045                 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1046                 if (ret)
1047                         return ret;
1048
1049                 if (!hnae3_dev_dcb_supported(hdev))
1050                         return 0;
1051
1052                 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1053                 if (ret == -EOPNOTSUPP) {
1054                         dev_warn(&hdev->pdev->dev,
1055                                  "fw %08x does't support ets tc weight cmd\n",
1056                                  hdev->fw_version);
1057                         ret = 0;
1058                 }
1059
1060                 return ret;
1061         } else {
1062                 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1063                 if (ret)
1064                         return ret;
1065         }
1066
1067         return 0;
1068 }
1069
1070 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1071 {
1072         int ret;
1073
1074         ret = hclge_up_to_tc_map(hdev);
1075         if (ret)
1076                 return ret;
1077
1078         ret = hclge_tm_pg_to_pri_map(hdev);
1079         if (ret)
1080                 return ret;
1081
1082         return hclge_tm_pri_q_qs_cfg(hdev);
1083 }
1084
1085 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1086 {
1087         int ret;
1088
1089         ret = hclge_tm_port_shaper_cfg(hdev);
1090         if (ret)
1091                 return ret;
1092
1093         ret = hclge_tm_pg_shaper_cfg(hdev);
1094         if (ret)
1095                 return ret;
1096
1097         return hclge_tm_pri_shaper_cfg(hdev);
1098 }
1099
1100 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1101 {
1102         int ret;
1103
1104         ret = hclge_tm_pg_dwrr_cfg(hdev);
1105         if (ret)
1106                 return ret;
1107
1108         return hclge_tm_pri_dwrr_cfg(hdev);
1109 }
1110
1111 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1112 {
1113         int ret;
1114         u8 i;
1115
1116         /* Only being config on TC-Based scheduler mode */
1117         if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1118                 return 0;
1119
1120         for (i = 0; i < hdev->tm_info.num_pg; i++) {
1121                 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1122                 if (ret)
1123                         return ret;
1124         }
1125
1126         return 0;
1127 }
1128
1129 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1130 {
1131         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1132         struct hclge_dev *hdev = vport->back;
1133         int ret;
1134         u8 i;
1135
1136         if (vport->vport_id >= HNAE3_MAX_TC)
1137                 return -EINVAL;
1138
1139         ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1140         if (ret)
1141                 return ret;
1142
1143         for (i = 0; i < kinfo->num_tc; i++) {
1144                 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1145
1146                 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1147                                                 sch_mode);
1148                 if (ret)
1149                         return ret;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1156 {
1157         struct hclge_vport *vport = hdev->vport;
1158         int ret;
1159         u8 i, k;
1160
1161         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1162                 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1163                         ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1164                         if (ret)
1165                                 return ret;
1166
1167                         for (k = 0; k < hdev->num_alloc_vport; k++) {
1168                                 ret = hclge_tm_qs_schd_mode_cfg(
1169                                         hdev, vport[k].qs_offset + i,
1170                                         HCLGE_SCH_MODE_DWRR);
1171                                 if (ret)
1172                                         return ret;
1173                         }
1174                 }
1175         } else {
1176                 for (i = 0; i < hdev->num_alloc_vport; i++) {
1177                         ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1178                         if (ret)
1179                                 return ret;
1180
1181                         vport++;
1182                 }
1183         }
1184
1185         return 0;
1186 }
1187
1188 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1189 {
1190         int ret;
1191
1192         ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1193         if (ret)
1194                 return ret;
1195
1196         return hclge_tm_lvl34_schd_mode_cfg(hdev);
1197 }
1198
1199 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1200 {
1201         int ret;
1202
1203         /* Cfg tm mapping  */
1204         ret = hclge_tm_map_cfg(hdev);
1205         if (ret)
1206                 return ret;
1207
1208         /* Cfg tm shaper */
1209         ret = hclge_tm_shaper_cfg(hdev);
1210         if (ret)
1211                 return ret;
1212
1213         /* Cfg dwrr */
1214         ret = hclge_tm_dwrr_cfg(hdev);
1215         if (ret)
1216                 return ret;
1217
1218         /* Cfg schd mode for each level schd */
1219         return hclge_tm_schd_mode_hw(hdev);
1220 }
1221
1222 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1223 {
1224         struct hclge_mac *mac = &hdev->hw.mac;
1225
1226         return hclge_pause_param_cfg(hdev, mac->mac_addr,
1227                                      HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1228                                      HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1229 }
1230
1231 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1232 {
1233         u8 enable_bitmap = 0;
1234
1235         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1236                 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1237                                 HCLGE_RX_MAC_PAUSE_EN_MSK;
1238
1239         return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1240                                       hdev->tm_info.pfc_en);
1241 }
1242
1243 /* Each Tc has a 1024 queue sets to backpress, it divides to
1244  * 32 group, each group contains 32 queue sets, which can be
1245  * represented by u32 bitmap.
1246  */
1247 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1248 {
1249         int i;
1250
1251         for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1252                 u32 qs_bitmap = 0;
1253                 int k, ret;
1254
1255                 for (k = 0; k < hdev->num_alloc_vport; k++) {
1256                         struct hclge_vport *vport = &hdev->vport[k];
1257                         u16 qs_id = vport->qs_offset + tc;
1258                         u8 grp, sub_grp;
1259
1260                         grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1261                                               HCLGE_BP_GRP_ID_S);
1262                         sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1263                                                   HCLGE_BP_SUB_GRP_ID_S);
1264                         if (i == grp)
1265                                 qs_bitmap |= (1 << sub_grp);
1266                 }
1267
1268                 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1269                 if (ret)
1270                         return ret;
1271         }
1272
1273         return 0;
1274 }
1275
1276 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1277 {
1278         bool tx_en, rx_en;
1279
1280         switch (hdev->tm_info.fc_mode) {
1281         case HCLGE_FC_NONE:
1282                 tx_en = false;
1283                 rx_en = false;
1284                 break;
1285         case HCLGE_FC_RX_PAUSE:
1286                 tx_en = false;
1287                 rx_en = true;
1288                 break;
1289         case HCLGE_FC_TX_PAUSE:
1290                 tx_en = true;
1291                 rx_en = false;
1292                 break;
1293         case HCLGE_FC_FULL:
1294                 tx_en = true;
1295                 rx_en = true;
1296                 break;
1297         case HCLGE_FC_PFC:
1298                 tx_en = false;
1299                 rx_en = false;
1300                 break;
1301         default:
1302                 tx_en = true;
1303                 rx_en = true;
1304         }
1305
1306         return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1307 }
1308
1309 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1310 {
1311         int ret = 0;
1312         int i;
1313
1314         for (i = 0; i < hdev->tm_info.num_tc; i++) {
1315                 ret = hclge_bp_setup_hw(hdev, i);
1316                 if (ret)
1317                         return ret;
1318         }
1319
1320         return ret;
1321 }
1322
1323 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1324 {
1325         int ret;
1326
1327         ret = hclge_pause_param_setup_hw(hdev);
1328         if (ret)
1329                 return ret;
1330
1331         ret = hclge_mac_pause_setup_hw(hdev);
1332         if (ret)
1333                 return ret;
1334
1335         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1336         if (!hnae3_dev_dcb_supported(hdev))
1337                 return 0;
1338
1339         /* GE MAC does not support PFC, when driver is initializing and MAC
1340          * is in GE Mode, ignore the error here, otherwise initialization
1341          * will fail.
1342          */
1343         ret = hclge_pfc_setup_hw(hdev);
1344         if (init && ret == -EOPNOTSUPP)
1345                 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1346         else if (ret) {
1347                 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1348                         ret);
1349                 return ret;
1350         }
1351
1352         return hclge_tm_bp_setup(hdev);
1353 }
1354
1355 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1356 {
1357         struct hclge_vport *vport = hdev->vport;
1358         struct hnae3_knic_private_info *kinfo;
1359         u32 i, k;
1360
1361         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1362                 hdev->tm_info.prio_tc[i] = prio_tc[i];
1363
1364                 for (k = 0;  k < hdev->num_alloc_vport; k++) {
1365                         kinfo = &vport[k].nic.kinfo;
1366                         kinfo->prio_tc[i] = prio_tc[i];
1367                 }
1368         }
1369 }
1370
1371 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1372 {
1373         u8 bit_map = 0;
1374         u8 i;
1375
1376         hdev->tm_info.num_tc = num_tc;
1377
1378         for (i = 0; i < hdev->tm_info.num_tc; i++)
1379                 bit_map |= BIT(i);
1380
1381         if (!bit_map) {
1382                 bit_map = 1;
1383                 hdev->tm_info.num_tc = 1;
1384         }
1385
1386         hdev->hw_tc_map = bit_map;
1387
1388         hclge_tm_schd_info_init(hdev);
1389 }
1390
1391 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1392 {
1393         /* DCB is enabled if we have more than 1 TC or pfc_en is
1394          * non-zero.
1395          */
1396         if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1397                 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1398         else
1399                 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1400
1401         hclge_pfc_info_init(hdev);
1402 }
1403
1404 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1405 {
1406         int ret;
1407
1408         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1409             (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1410                 return -ENOTSUPP;
1411
1412         ret = hclge_tm_schd_setup_hw(hdev);
1413         if (ret)
1414                 return ret;
1415
1416         ret = hclge_pause_setup_hw(hdev, init);
1417         if (ret)
1418                 return ret;
1419
1420         return 0;
1421 }
1422
1423 int hclge_tm_schd_init(struct hclge_dev *hdev)
1424 {
1425         /* fc_mode is HCLGE_FC_FULL on reset */
1426         hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1427         hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1428
1429         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1430             hdev->tm_info.num_pg != 1)
1431                 return -EINVAL;
1432
1433         hclge_tm_schd_info_init(hdev);
1434
1435         return hclge_tm_init_hw(hdev, true);
1436 }
1437
1438 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1439 {
1440         struct hclge_vport *vport = hdev->vport;
1441         int ret;
1442
1443         hclge_tm_vport_tc_info_update(vport);
1444
1445         ret = hclge_vport_q_to_qs_map(hdev, vport);
1446         if (ret)
1447                 return ret;
1448
1449         if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1450                 return 0;
1451
1452         return hclge_tm_bp_setup(hdev);
1453 }