]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
Linux 5.6-rc7
[linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
10
11 #include "spectrum.h"
12 #include "core.h"
13 #include "port.h"
14 #include "reg.h"
15
16 struct mlxsw_sp_sb_pr {
17         enum mlxsw_reg_sbpr_mode mode;
18         u32 size;
19         u8 freeze_mode:1,
20            freeze_size:1;
21 };
22
23 struct mlxsw_cp_sb_occ {
24         u32 cur;
25         u32 max;
26 };
27
28 struct mlxsw_sp_sb_cm {
29         u32 min_buff;
30         u32 max_buff;
31         u16 pool_index;
32         struct mlxsw_cp_sb_occ occ;
33         u8 freeze_pool:1,
34            freeze_thresh:1;
35 };
36
37 #define MLXSW_SP_SB_INFI -1U
38 #define MLXSW_SP_SB_REST -2U
39
40 struct mlxsw_sp_sb_pm {
41         u32 min_buff;
42         u32 max_buff;
43         struct mlxsw_cp_sb_occ occ;
44 };
45
46 struct mlxsw_sp_sb_mm {
47         u32 min_buff;
48         u32 max_buff;
49         u16 pool_index;
50 };
51
52 struct mlxsw_sp_sb_pool_des {
53         enum mlxsw_reg_sbxx_dir dir;
54         u8 pool;
55 };
56
57 #define MLXSW_SP_SB_POOL_ING            0
58 #define MLXSW_SP_SB_POOL_EGR            4
59 #define MLXSW_SP_SB_POOL_EGR_MC         8
60 #define MLXSW_SP_SB_POOL_ING_CPU        9
61 #define MLXSW_SP_SB_POOL_EGR_CPU        10
62
63 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
64         {MLXSW_REG_SBXX_DIR_INGRESS, 0},
65         {MLXSW_REG_SBXX_DIR_INGRESS, 1},
66         {MLXSW_REG_SBXX_DIR_INGRESS, 2},
67         {MLXSW_REG_SBXX_DIR_INGRESS, 3},
68         {MLXSW_REG_SBXX_DIR_EGRESS, 0},
69         {MLXSW_REG_SBXX_DIR_EGRESS, 1},
70         {MLXSW_REG_SBXX_DIR_EGRESS, 2},
71         {MLXSW_REG_SBXX_DIR_EGRESS, 3},
72         {MLXSW_REG_SBXX_DIR_EGRESS, 15},
73         {MLXSW_REG_SBXX_DIR_INGRESS, 4},
74         {MLXSW_REG_SBXX_DIR_EGRESS, 4},
75 };
76
77 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
78         {MLXSW_REG_SBXX_DIR_INGRESS, 0},
79         {MLXSW_REG_SBXX_DIR_INGRESS, 1},
80         {MLXSW_REG_SBXX_DIR_INGRESS, 2},
81         {MLXSW_REG_SBXX_DIR_INGRESS, 3},
82         {MLXSW_REG_SBXX_DIR_EGRESS, 0},
83         {MLXSW_REG_SBXX_DIR_EGRESS, 1},
84         {MLXSW_REG_SBXX_DIR_EGRESS, 2},
85         {MLXSW_REG_SBXX_DIR_EGRESS, 3},
86         {MLXSW_REG_SBXX_DIR_EGRESS, 15},
87         {MLXSW_REG_SBXX_DIR_INGRESS, 4},
88         {MLXSW_REG_SBXX_DIR_EGRESS, 4},
89 };
90
91 #define MLXSW_SP_SB_ING_TC_COUNT 8
92 #define MLXSW_SP_SB_EG_TC_COUNT 16
93
94 struct mlxsw_sp_sb_port {
95         struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
96         struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
97         struct mlxsw_sp_sb_pm *pms;
98 };
99
100 struct mlxsw_sp_sb {
101         struct mlxsw_sp_sb_pr *prs;
102         struct mlxsw_sp_sb_port *ports;
103         u32 cell_size;
104         u32 max_headroom_cells;
105         u64 sb_size;
106 };
107
108 struct mlxsw_sp_sb_vals {
109         unsigned int pool_count;
110         const struct mlxsw_sp_sb_pool_des *pool_dess;
111         const struct mlxsw_sp_sb_pm *pms;
112         const struct mlxsw_sp_sb_pm *pms_cpu;
113         const struct mlxsw_sp_sb_pr *prs;
114         const struct mlxsw_sp_sb_mm *mms;
115         const struct mlxsw_sp_sb_cm *cms_ingress;
116         const struct mlxsw_sp_sb_cm *cms_egress;
117         const struct mlxsw_sp_sb_cm *cms_cpu;
118         unsigned int mms_count;
119         unsigned int cms_ingress_count;
120         unsigned int cms_egress_count;
121         unsigned int cms_cpu_count;
122 };
123
124 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
125 {
126         return mlxsw_sp->sb->cell_size * cells;
127 }
128
129 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
130 {
131         return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
132 }
133
134 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
135 {
136         return mlxsw_sp->sb->max_headroom_cells;
137 }
138
139 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
140                                                  u16 pool_index)
141 {
142         return &mlxsw_sp->sb->prs[pool_index];
143 }
144
145 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
146 {
147         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
148                 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
149         else
150                 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
151 }
152
153 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
154                                                  u8 local_port, u8 pg_buff,
155                                                  enum mlxsw_reg_sbxx_dir dir)
156 {
157         struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
158
159         WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
160         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
161                 return &sb_port->ing_cms[pg_buff];
162         else
163                 return &sb_port->eg_cms[pg_buff];
164 }
165
166 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
167                                                  u8 local_port, u16 pool_index)
168 {
169         return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
170 }
171
172 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
173                                 enum mlxsw_reg_sbpr_mode mode,
174                                 u32 size, bool infi_size)
175 {
176         const struct mlxsw_sp_sb_pool_des *des =
177                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
178         char sbpr_pl[MLXSW_REG_SBPR_LEN];
179         struct mlxsw_sp_sb_pr *pr;
180         int err;
181
182         mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
183                             size, infi_size);
184         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
185         if (err)
186                 return err;
187
188         if (infi_size)
189                 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
190         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
191         pr->mode = mode;
192         pr->size = size;
193         return 0;
194 }
195
196 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
197                                 u8 pg_buff, u32 min_buff, u32 max_buff,
198                                 bool infi_max, u16 pool_index)
199 {
200         const struct mlxsw_sp_sb_pool_des *des =
201                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
202         char sbcm_pl[MLXSW_REG_SBCM_LEN];
203         struct mlxsw_sp_sb_cm *cm;
204         int err;
205
206         mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
207                             min_buff, max_buff, infi_max, des->pool);
208         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
209         if (err)
210                 return err;
211
212         if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
213                 if (infi_max)
214                         max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
215                                                         mlxsw_sp->sb->sb_size);
216
217                 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
218                                         des->dir);
219                 cm->min_buff = min_buff;
220                 cm->max_buff = max_buff;
221                 cm->pool_index = pool_index;
222         }
223         return 0;
224 }
225
226 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
227                                 u16 pool_index, u32 min_buff, u32 max_buff)
228 {
229         const struct mlxsw_sp_sb_pool_des *des =
230                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
231         char sbpm_pl[MLXSW_REG_SBPM_LEN];
232         struct mlxsw_sp_sb_pm *pm;
233         int err;
234
235         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
236                             min_buff, max_buff);
237         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
238         if (err)
239                 return err;
240
241         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
242         pm->min_buff = min_buff;
243         pm->max_buff = max_buff;
244         return 0;
245 }
246
247 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
248                                     u16 pool_index, struct list_head *bulk_list)
249 {
250         const struct mlxsw_sp_sb_pool_des *des =
251                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
252         char sbpm_pl[MLXSW_REG_SBPM_LEN];
253
254         if (local_port == MLXSW_PORT_CPU_PORT &&
255             des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
256                 return 0;
257
258         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
259                             true, 0, 0);
260         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
261                                      bulk_list, NULL, 0);
262 }
263
264 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
265                                         char *sbpm_pl, size_t sbpm_pl_len,
266                                         unsigned long cb_priv)
267 {
268         struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
269
270         mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
271 }
272
273 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
274                                     u16 pool_index, struct list_head *bulk_list)
275 {
276         const struct mlxsw_sp_sb_pool_des *des =
277                 &mlxsw_sp->sb_vals->pool_dess[pool_index];
278         char sbpm_pl[MLXSW_REG_SBPM_LEN];
279         struct mlxsw_sp_sb_pm *pm;
280
281         if (local_port == MLXSW_PORT_CPU_PORT &&
282             des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
283                 return 0;
284
285         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
286         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
287                             false, 0, 0);
288         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
289                                      bulk_list,
290                                      mlxsw_sp_sb_pm_occ_query_cb,
291                                      (unsigned long) pm);
292 }
293
294 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
295 #define MLXSW_SP_PB_HEADROOM 25632
296 #define MLXSW_SP_PB_UNUSED 8
297
298 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
299 {
300         const u32 pbs[] = {
301                 [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
302                 [9] = MLXSW_PORT_MAX_MTU,
303         };
304         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
305         char pbmc_pl[MLXSW_REG_PBMC_LEN];
306         int i;
307
308         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
309                             0xffff, 0xffff / 2);
310         for (i = 0; i < ARRAY_SIZE(pbs); i++) {
311                 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
312
313                 if (i == MLXSW_SP_PB_UNUSED)
314                         continue;
315                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
316         }
317         mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
318                                          MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
319         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
320 }
321
322 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
323 {
324         char pptb_pl[MLXSW_REG_PPTB_LEN];
325         int i;
326
327         mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
328         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
329                 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
330         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
331                                pptb_pl);
332 }
333
334 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
335 {
336         int err;
337
338         err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
339         if (err)
340                 return err;
341         return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
342 }
343
344 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
345                                  struct mlxsw_sp_sb_port *sb_port)
346 {
347         struct mlxsw_sp_sb_pm *pms;
348
349         pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
350                       GFP_KERNEL);
351         if (!pms)
352                 return -ENOMEM;
353         sb_port->pms = pms;
354         return 0;
355 }
356
357 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
358 {
359         kfree(sb_port->pms);
360 }
361
362 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
363 {
364         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
365         struct mlxsw_sp_sb_pr *prs;
366         int i;
367         int err;
368
369         mlxsw_sp->sb->ports = kcalloc(max_ports,
370                                       sizeof(struct mlxsw_sp_sb_port),
371                                       GFP_KERNEL);
372         if (!mlxsw_sp->sb->ports)
373                 return -ENOMEM;
374
375         prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
376                       GFP_KERNEL);
377         if (!prs) {
378                 err = -ENOMEM;
379                 goto err_alloc_prs;
380         }
381         mlxsw_sp->sb->prs = prs;
382
383         for (i = 0; i < max_ports; i++) {
384                 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
385                 if (err)
386                         goto err_sb_port_init;
387         }
388
389         return 0;
390
391 err_sb_port_init:
392         for (i--; i >= 0; i--)
393                 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
394         kfree(mlxsw_sp->sb->prs);
395 err_alloc_prs:
396         kfree(mlxsw_sp->sb->ports);
397         return err;
398 }
399
400 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
401 {
402         int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
403         int i;
404
405         for (i = max_ports - 1; i >= 0; i--)
406                 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
407         kfree(mlxsw_sp->sb->prs);
408         kfree(mlxsw_sp->sb->ports);
409 }
410
411 #define MLXSW_SP_SB_PR(_mode, _size)    \
412         {                               \
413                 .mode = _mode,          \
414                 .size = _size,          \
415         }
416
417 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size)    \
418         {                                                               \
419                 .mode = _mode,                                          \
420                 .size = _size,                                          \
421                 .freeze_mode = _freeze_mode,                            \
422                 .freeze_size = _freeze_size,                            \
423         }
424
425 #define MLXSW_SP1_SB_PR_CPU_SIZE        (256 * 1000)
426
427 /* Order according to mlxsw_sp1_sb_pool_dess */
428 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
429         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
430         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
431         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
432         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
433         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
434                            true, false),
435         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
436         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
437         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
438         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
439                            true, true),
440         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
441                            MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
442         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
443                            MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
444 };
445
446 #define MLXSW_SP2_SB_PR_CPU_SIZE        (256 * 1000)
447
448 /* Order according to mlxsw_sp2_sb_pool_dess */
449 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
450         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
451         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
452         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
453         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
454         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
455                            true, false),
456         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
457         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
458         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
459         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
460                            true, true),
461         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
462                            MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
463         MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
464                            MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
465 };
466
467 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
468                                 const struct mlxsw_sp_sb_pr *prs,
469                                 const struct mlxsw_sp_sb_pool_des *pool_dess,
470                                 size_t prs_len)
471 {
472         /* Round down, unlike mlxsw_sp_bytes_cells(). */
473         u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
474         u32 rest_cells[2] = {sb_cells, sb_cells};
475         int i;
476         int err;
477
478         /* Calculate how much space to give to the "REST" pools in either
479          * direction.
480          */
481         for (i = 0; i < prs_len; i++) {
482                 enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
483                 u32 size = prs[i].size;
484                 u32 size_cells;
485
486                 if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
487                         continue;
488
489                 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
490                 if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
491                         continue;
492
493                 rest_cells[dir] -= size_cells;
494         }
495
496         for (i = 0; i < prs_len; i++) {
497                 u32 size = prs[i].size;
498                 u32 size_cells;
499
500                 if (size == MLXSW_SP_SB_INFI) {
501                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
502                                                    0, true);
503                 } else if (size == MLXSW_SP_SB_REST) {
504                         size_cells = rest_cells[pool_dess[i].dir];
505                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
506                                                    size_cells, false);
507                 } else {
508                         size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
509                         err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
510                                                    size_cells, false);
511                 }
512                 if (err)
513                         return err;
514         }
515         return 0;
516 }
517
518 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)     \
519         {                                               \
520                 .min_buff = _min_buff,                  \
521                 .max_buff = _max_buff,                  \
522                 .pool_index = _pool,                    \
523         }
524
525 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff)        \
526         {                                               \
527                 .min_buff = _min_buff,                  \
528                 .max_buff = _max_buff,                  \
529                 .pool_index = MLXSW_SP_SB_POOL_ING,     \
530         }
531
532 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff)        \
533         {                                               \
534                 .min_buff = _min_buff,                  \
535                 .max_buff = _max_buff,                  \
536                 .pool_index = MLXSW_SP_SB_POOL_EGR,     \
537         }
538
539 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff)     \
540         {                                               \
541                 .min_buff = _min_buff,                  \
542                 .max_buff = _max_buff,                  \
543                 .pool_index = MLXSW_SP_SB_POOL_EGR_MC,  \
544                 .freeze_pool = true,                    \
545                 .freeze_thresh = true,                  \
546         }
547
548 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
549         MLXSW_SP_SB_CM_ING(10000, 8),
550         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
551         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
552         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
553         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
554         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
555         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
556         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
557         MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
558         MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
559 };
560
561 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
562         MLXSW_SP_SB_CM_ING(0, 7),
563         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
564         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
565         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
566         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
567         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
568         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
569         MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
570         MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
571         MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
572 };
573
574 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
575         MLXSW_SP_SB_CM_EGR(1500, 9),
576         MLXSW_SP_SB_CM_EGR(1500, 9),
577         MLXSW_SP_SB_CM_EGR(1500, 9),
578         MLXSW_SP_SB_CM_EGR(1500, 9),
579         MLXSW_SP_SB_CM_EGR(1500, 9),
580         MLXSW_SP_SB_CM_EGR(1500, 9),
581         MLXSW_SP_SB_CM_EGR(1500, 9),
582         MLXSW_SP_SB_CM_EGR(1500, 9),
583         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
584         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
585         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
586         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
587         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
588         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
589         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
590         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
591         MLXSW_SP_SB_CM_EGR(1, 0xff),
592 };
593
594 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
595         MLXSW_SP_SB_CM_EGR(0, 7),
596         MLXSW_SP_SB_CM_EGR(0, 7),
597         MLXSW_SP_SB_CM_EGR(0, 7),
598         MLXSW_SP_SB_CM_EGR(0, 7),
599         MLXSW_SP_SB_CM_EGR(0, 7),
600         MLXSW_SP_SB_CM_EGR(0, 7),
601         MLXSW_SP_SB_CM_EGR(0, 7),
602         MLXSW_SP_SB_CM_EGR(0, 7),
603         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
604         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
605         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
606         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
607         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
608         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
609         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
610         MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
611         MLXSW_SP_SB_CM_EGR(1, 0xff),
612 };
613
614 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
615
616 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
617         MLXSW_SP_CPU_PORT_SB_CM,
618         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
619         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
620         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
621         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
622         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
623         MLXSW_SP_CPU_PORT_SB_CM,
624         MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
625         MLXSW_SP_CPU_PORT_SB_CM,
626         MLXSW_SP_CPU_PORT_SB_CM,
627         MLXSW_SP_CPU_PORT_SB_CM,
628         MLXSW_SP_CPU_PORT_SB_CM,
629         MLXSW_SP_CPU_PORT_SB_CM,
630         MLXSW_SP_CPU_PORT_SB_CM,
631         MLXSW_SP_CPU_PORT_SB_CM,
632         MLXSW_SP_CPU_PORT_SB_CM,
633         MLXSW_SP_CPU_PORT_SB_CM,
634         MLXSW_SP_CPU_PORT_SB_CM,
635         MLXSW_SP_CPU_PORT_SB_CM,
636         MLXSW_SP_CPU_PORT_SB_CM,
637         MLXSW_SP_CPU_PORT_SB_CM,
638         MLXSW_SP_CPU_PORT_SB_CM,
639         MLXSW_SP_CPU_PORT_SB_CM,
640         MLXSW_SP_CPU_PORT_SB_CM,
641         MLXSW_SP_CPU_PORT_SB_CM,
642         MLXSW_SP_CPU_PORT_SB_CM,
643         MLXSW_SP_CPU_PORT_SB_CM,
644         MLXSW_SP_CPU_PORT_SB_CM,
645         MLXSW_SP_CPU_PORT_SB_CM,
646         MLXSW_SP_CPU_PORT_SB_CM,
647         MLXSW_SP_CPU_PORT_SB_CM,
648         MLXSW_SP_CPU_PORT_SB_CM,
649 };
650
651 static bool
652 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
653 {
654         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
655
656         return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
657 }
658
659 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
660                                   enum mlxsw_reg_sbxx_dir dir,
661                                   const struct mlxsw_sp_sb_cm *cms,
662                                   size_t cms_len)
663 {
664         const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
665         int i;
666         int err;
667
668         for (i = 0; i < cms_len; i++) {
669                 const struct mlxsw_sp_sb_cm *cm;
670                 u32 min_buff;
671                 u32 max_buff;
672
673                 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
674                         continue; /* PG number 8 does not exist, skip it */
675                 cm = &cms[i];
676                 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
677                         continue;
678
679                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
680                 max_buff = cm->max_buff;
681                 if (max_buff == MLXSW_SP_SB_INFI) {
682                         err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
683                                                    min_buff, 0,
684                                                    true, cm->pool_index);
685                 } else {
686                         if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
687                                                        cm->pool_index))
688                                 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
689                                                                 max_buff);
690                         err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
691                                                    min_buff, max_buff,
692                                                    false, cm->pool_index);
693                 }
694                 if (err)
695                         return err;
696         }
697         return 0;
698 }
699
700 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
701 {
702         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
703         int err;
704
705         err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
706                                      mlxsw_sp_port->local_port,
707                                      MLXSW_REG_SBXX_DIR_INGRESS,
708                                      mlxsw_sp->sb_vals->cms_ingress,
709                                      mlxsw_sp->sb_vals->cms_ingress_count);
710         if (err)
711                 return err;
712         return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
713                                       mlxsw_sp_port->local_port,
714                                       MLXSW_REG_SBXX_DIR_EGRESS,
715                                       mlxsw_sp->sb_vals->cms_egress,
716                                       mlxsw_sp->sb_vals->cms_egress_count);
717 }
718
719 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
720 {
721         return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
722                                       mlxsw_sp->sb_vals->cms_cpu,
723                                       mlxsw_sp->sb_vals->cms_cpu_count);
724 }
725
726 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)    \
727         {                                       \
728                 .min_buff = _min_buff,          \
729                 .max_buff = _max_buff,          \
730         }
731
732 /* Order according to mlxsw_sp1_sb_pool_dess */
733 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
734         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
735         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
736         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
737         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
738         MLXSW_SP_SB_PM(0, 7),
739         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
740         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
741         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
742         MLXSW_SP_SB_PM(10000, 90000),
743         MLXSW_SP_SB_PM(0, 8),   /* 50% occupancy */
744         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
745 };
746
747 /* Order according to mlxsw_sp2_sb_pool_dess */
748 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
749         MLXSW_SP_SB_PM(0, 7),
750         MLXSW_SP_SB_PM(0, 0),
751         MLXSW_SP_SB_PM(0, 0),
752         MLXSW_SP_SB_PM(0, 0),
753         MLXSW_SP_SB_PM(0, 7),
754         MLXSW_SP_SB_PM(0, 0),
755         MLXSW_SP_SB_PM(0, 0),
756         MLXSW_SP_SB_PM(0, 0),
757         MLXSW_SP_SB_PM(10000, 90000),
758         MLXSW_SP_SB_PM(0, 8),   /* 50% occupancy */
759         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
760 };
761
762 /* Order according to mlxsw_sp*_sb_pool_dess */
763 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
764         MLXSW_SP_SB_PM(0, 0),
765         MLXSW_SP_SB_PM(0, 0),
766         MLXSW_SP_SB_PM(0, 0),
767         MLXSW_SP_SB_PM(0, 0),
768         MLXSW_SP_SB_PM(0, 0),
769         MLXSW_SP_SB_PM(0, 0),
770         MLXSW_SP_SB_PM(0, 0),
771         MLXSW_SP_SB_PM(0, 0),
772         MLXSW_SP_SB_PM(0, 90000),
773         MLXSW_SP_SB_PM(0, 0),
774         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
775 };
776
777 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
778                                 const struct mlxsw_sp_sb_pm *pms,
779                                 bool skip_ingress)
780 {
781         int i, err;
782
783         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
784                 const struct mlxsw_sp_sb_pm *pm = &pms[i];
785                 const struct mlxsw_sp_sb_pool_des *des;
786                 u32 max_buff;
787                 u32 min_buff;
788
789                 des = &mlxsw_sp->sb_vals->pool_dess[i];
790                 if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
791                         continue;
792
793                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
794                 max_buff = pm->max_buff;
795                 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
796                         max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
797                 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
798                                            max_buff);
799                 if (err)
800                         return err;
801         }
802         return 0;
803 }
804
805 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
806 {
807         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
808
809         return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
810                                     mlxsw_sp->sb_vals->pms, false);
811 }
812
813 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
814 {
815         return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
816                                     true);
817 }
818
819 #define MLXSW_SP_SB_MM(_min_buff, _max_buff)            \
820         {                                               \
821                 .min_buff = _min_buff,                  \
822                 .max_buff = _max_buff,                  \
823                 .pool_index = MLXSW_SP_SB_POOL_EGR,     \
824         }
825
826 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
827         MLXSW_SP_SB_MM(0, 6),
828         MLXSW_SP_SB_MM(0, 6),
829         MLXSW_SP_SB_MM(0, 6),
830         MLXSW_SP_SB_MM(0, 6),
831         MLXSW_SP_SB_MM(0, 6),
832         MLXSW_SP_SB_MM(0, 6),
833         MLXSW_SP_SB_MM(0, 6),
834         MLXSW_SP_SB_MM(0, 6),
835         MLXSW_SP_SB_MM(0, 6),
836         MLXSW_SP_SB_MM(0, 6),
837         MLXSW_SP_SB_MM(0, 6),
838         MLXSW_SP_SB_MM(0, 6),
839         MLXSW_SP_SB_MM(0, 6),
840         MLXSW_SP_SB_MM(0, 6),
841         MLXSW_SP_SB_MM(0, 6),
842 };
843
844 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
845 {
846         char sbmm_pl[MLXSW_REG_SBMM_LEN];
847         int i;
848         int err;
849
850         for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
851                 const struct mlxsw_sp_sb_pool_des *des;
852                 const struct mlxsw_sp_sb_mm *mc;
853                 u32 min_buff;
854
855                 mc = &mlxsw_sp->sb_vals->mms[i];
856                 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
857                 /* All pools used by sb_mm's are initialized using dynamic
858                  * thresholds, therefore 'max_buff' isn't specified in cells.
859                  */
860                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
861                 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
862                                     des->pool);
863                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
864                 if (err)
865                         return err;
866         }
867         return 0;
868 }
869
870 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
871                                 u16 *p_ingress_len, u16 *p_egress_len)
872 {
873         int i;
874
875         for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
876                 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
877                     MLXSW_REG_SBXX_DIR_INGRESS)
878                         (*p_ingress_len)++;
879                 else
880                         (*p_egress_len)++;
881         }
882
883         WARN(*p_egress_len == 0, "No egress pools\n");
884 }
885
886 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
887         .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
888         .pool_dess = mlxsw_sp1_sb_pool_dess,
889         .pms = mlxsw_sp1_sb_pms,
890         .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
891         .prs = mlxsw_sp1_sb_prs,
892         .mms = mlxsw_sp_sb_mms,
893         .cms_ingress = mlxsw_sp1_sb_cms_ingress,
894         .cms_egress = mlxsw_sp1_sb_cms_egress,
895         .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
896         .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
897         .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
898         .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
899         .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
900 };
901
902 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
903         .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
904         .pool_dess = mlxsw_sp2_sb_pool_dess,
905         .pms = mlxsw_sp2_sb_pms,
906         .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
907         .prs = mlxsw_sp2_sb_prs,
908         .mms = mlxsw_sp_sb_mms,
909         .cms_ingress = mlxsw_sp2_sb_cms_ingress,
910         .cms_egress = mlxsw_sp2_sb_cms_egress,
911         .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
912         .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
913         .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
914         .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
915         .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
916 };
917
918 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
919 {
920         u32 max_headroom_size;
921         u16 ing_pool_count = 0;
922         u16 eg_pool_count = 0;
923         int err;
924
925         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
926                 return -EIO;
927
928         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
929                 return -EIO;
930
931         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
932                 return -EIO;
933
934         mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
935         if (!mlxsw_sp->sb)
936                 return -ENOMEM;
937         mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
938         mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
939                                                    GUARANTEED_SHARED_BUFFER);
940         max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
941                                                MAX_HEADROOM_SIZE);
942         /* Round down, because this limit must not be overstepped. */
943         mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
944                                                 mlxsw_sp->sb->cell_size;
945
946         err = mlxsw_sp_sb_ports_init(mlxsw_sp);
947         if (err)
948                 goto err_sb_ports_init;
949         err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
950                                    mlxsw_sp->sb_vals->pool_dess,
951                                    mlxsw_sp->sb_vals->pool_count);
952         if (err)
953                 goto err_sb_prs_init;
954         err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
955         if (err)
956                 goto err_sb_cpu_port_sb_cms_init;
957         err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
958         if (err)
959                 goto err_sb_cpu_port_pms_init;
960         err = mlxsw_sp_sb_mms_init(mlxsw_sp);
961         if (err)
962                 goto err_sb_mms_init;
963         mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
964         err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
965                                   mlxsw_sp->sb->sb_size,
966                                   ing_pool_count,
967                                   eg_pool_count,
968                                   MLXSW_SP_SB_ING_TC_COUNT,
969                                   MLXSW_SP_SB_EG_TC_COUNT);
970         if (err)
971                 goto err_devlink_sb_register;
972
973         return 0;
974
975 err_devlink_sb_register:
976 err_sb_mms_init:
977 err_sb_cpu_port_pms_init:
978 err_sb_cpu_port_sb_cms_init:
979 err_sb_prs_init:
980         mlxsw_sp_sb_ports_fini(mlxsw_sp);
981 err_sb_ports_init:
982         kfree(mlxsw_sp->sb);
983         return err;
984 }
985
986 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
987 {
988         devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
989         mlxsw_sp_sb_ports_fini(mlxsw_sp);
990         kfree(mlxsw_sp->sb);
991 }
992
993 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
994 {
995         int err;
996
997         err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
998         if (err)
999                 return err;
1000         err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
1001         if (err)
1002                 return err;
1003         err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
1004
1005         return err;
1006 }
1007
1008 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
1009                          unsigned int sb_index, u16 pool_index,
1010                          struct devlink_sb_pool_info *pool_info)
1011 {
1012         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1013         enum mlxsw_reg_sbxx_dir dir;
1014         struct mlxsw_sp_sb_pr *pr;
1015
1016         dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
1017         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1018         pool_info->pool_type = (enum devlink_sb_pool_type) dir;
1019         pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
1020         pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
1021         pool_info->cell_size = mlxsw_sp->sb->cell_size;
1022         return 0;
1023 }
1024
1025 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
1026                          unsigned int sb_index, u16 pool_index, u32 size,
1027                          enum devlink_sb_threshold_type threshold_type,
1028                          struct netlink_ext_ack *extack)
1029 {
1030         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1031         u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1032         const struct mlxsw_sp_sb_pr *pr;
1033         enum mlxsw_reg_sbpr_mode mode;
1034
1035         mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1036         pr = &mlxsw_sp->sb_vals->prs[pool_index];
1037
1038         if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
1039                                       GUARANTEED_SHARED_BUFFER)) {
1040                 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1041                 return -EINVAL;
1042         }
1043
1044         if (pr->freeze_mode && pr->mode != mode) {
1045                 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1046                 return -EINVAL;
1047         }
1048
1049         if (pr->freeze_size && pr->size != size) {
1050                 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1051                 return -EINVAL;
1052         }
1053
1054         return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1055                                     pool_size, false);
1056 }
1057
1058 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1059
1060 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1061                                      u32 max_buff)
1062 {
1063         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1064
1065         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1066                 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1067         return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1068 }
1069
1070 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1071                                     u32 threshold, u32 *p_max_buff,
1072                                     struct netlink_ext_ack *extack)
1073 {
1074         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1075
1076         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1077                 int val;
1078
1079                 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1080                 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1081                     val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1082                         NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1083                         return -EINVAL;
1084                 }
1085                 *p_max_buff = val;
1086         } else {
1087                 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1088         }
1089         return 0;
1090 }
1091
1092 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1093                               unsigned int sb_index, u16 pool_index,
1094                               u32 *p_threshold)
1095 {
1096         struct mlxsw_sp_port *mlxsw_sp_port =
1097                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1098         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1099         u8 local_port = mlxsw_sp_port->local_port;
1100         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1101                                                        pool_index);
1102
1103         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1104                                                  pm->max_buff);
1105         return 0;
1106 }
1107
1108 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1109                               unsigned int sb_index, u16 pool_index,
1110                               u32 threshold, struct netlink_ext_ack *extack)
1111 {
1112         struct mlxsw_sp_port *mlxsw_sp_port =
1113                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1114         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1115         u8 local_port = mlxsw_sp_port->local_port;
1116         u32 max_buff;
1117         int err;
1118
1119         if (local_port == MLXSW_PORT_CPU_PORT) {
1120                 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
1121                 return -EINVAL;
1122         }
1123
1124         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1125                                        threshold, &max_buff, extack);
1126         if (err)
1127                 return err;
1128
1129         return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1130                                     0, max_buff);
1131 }
1132
1133 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1134                                  unsigned int sb_index, u16 tc_index,
1135                                  enum devlink_sb_pool_type pool_type,
1136                                  u16 *p_pool_index, u32 *p_threshold)
1137 {
1138         struct mlxsw_sp_port *mlxsw_sp_port =
1139                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1140         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1141         u8 local_port = mlxsw_sp_port->local_port;
1142         u8 pg_buff = tc_index;
1143         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1144         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1145                                                        pg_buff, dir);
1146
1147         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1148                                                  cm->max_buff);
1149         *p_pool_index = cm->pool_index;
1150         return 0;
1151 }
1152
1153 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1154                                  unsigned int sb_index, u16 tc_index,
1155                                  enum devlink_sb_pool_type pool_type,
1156                                  u16 pool_index, u32 threshold,
1157                                  struct netlink_ext_ack *extack)
1158 {
1159         struct mlxsw_sp_port *mlxsw_sp_port =
1160                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1161         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1162         u8 local_port = mlxsw_sp_port->local_port;
1163         const struct mlxsw_sp_sb_cm *cm;
1164         u8 pg_buff = tc_index;
1165         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1166         u32 max_buff;
1167         int err;
1168
1169         if (local_port == MLXSW_PORT_CPU_PORT) {
1170                 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
1171                 return -EINVAL;
1172         }
1173
1174         if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1175                 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1176                 return -EINVAL;
1177         }
1178
1179         if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1180                 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1181         else
1182                 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1183
1184         if (cm->freeze_pool && cm->pool_index != pool_index) {
1185                 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1186                 return -EINVAL;
1187         }
1188
1189         if (cm->freeze_thresh && cm->max_buff != threshold) {
1190                 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1191                 return -EINVAL;
1192         }
1193
1194         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1195                                        threshold, &max_buff, extack);
1196         if (err)
1197                 return err;
1198
1199         return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1200                                     0, max_buff, false, pool_index);
1201 }
1202
1203 #define MASKED_COUNT_MAX \
1204         (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1205          (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1206
1207 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1208         u8 masked_count;
1209         u8 local_port_1;
1210 };
1211
1212 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1213                                         char *sbsr_pl, size_t sbsr_pl_len,
1214                                         unsigned long cb_priv)
1215 {
1216         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1217         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1218         u8 masked_count;
1219         u8 local_port;
1220         int rec_index = 0;
1221         struct mlxsw_sp_sb_cm *cm;
1222         int i;
1223
1224         memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1225
1226         masked_count = 0;
1227         for (local_port = cb_ctx.local_port_1;
1228              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1229                 if (!mlxsw_sp->ports[local_port])
1230                         continue;
1231                 if (local_port == MLXSW_PORT_CPU_PORT) {
1232                         /* Ingress quotas are not supported for the CPU port */
1233                         masked_count++;
1234                         continue;
1235                 }
1236                 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1237                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1238                                                 MLXSW_REG_SBXX_DIR_INGRESS);
1239                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1240                                                   &cm->occ.cur, &cm->occ.max);
1241                 }
1242                 if (++masked_count == cb_ctx.masked_count)
1243                         break;
1244         }
1245         masked_count = 0;
1246         for (local_port = cb_ctx.local_port_1;
1247              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1248                 if (!mlxsw_sp->ports[local_port])
1249                         continue;
1250                 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1251                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1252                                                 MLXSW_REG_SBXX_DIR_EGRESS);
1253                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1254                                                   &cm->occ.cur, &cm->occ.max);
1255                 }
1256                 if (++masked_count == cb_ctx.masked_count)
1257                         break;
1258         }
1259 }
1260
1261 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1262                              unsigned int sb_index)
1263 {
1264         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1265         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1266         unsigned long cb_priv;
1267         LIST_HEAD(bulk_list);
1268         char *sbsr_pl;
1269         u8 masked_count;
1270         u8 local_port_1;
1271         u8 local_port;
1272         int i;
1273         int err;
1274         int err2;
1275
1276         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1277         if (!sbsr_pl)
1278                 return -ENOMEM;
1279
1280         local_port = MLXSW_PORT_CPU_PORT;
1281 next_batch:
1282         local_port_1 = local_port;
1283         masked_count = 0;
1284         mlxsw_reg_sbsr_pack(sbsr_pl, false);
1285         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1286                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1287         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1288                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1289         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1290                 if (!mlxsw_sp->ports[local_port])
1291                         continue;
1292                 if (local_port != MLXSW_PORT_CPU_PORT) {
1293                         /* Ingress quotas are not supported for the CPU port */
1294                         mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1295                                                              local_port, 1);
1296                 }
1297                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1298                 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1299                         err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1300                                                        &bulk_list);
1301                         if (err)
1302                                 goto out;
1303                 }
1304                 if (++masked_count == MASKED_COUNT_MAX)
1305                         goto do_query;
1306         }
1307
1308 do_query:
1309         cb_ctx.masked_count = masked_count;
1310         cb_ctx.local_port_1 = local_port_1;
1311         memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1312         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1313                                     &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1314                                     cb_priv);
1315         if (err)
1316                 goto out;
1317         if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1318                 local_port++;
1319                 goto next_batch;
1320         }
1321
1322 out:
1323         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1324         if (!err)
1325                 err = err2;
1326         kfree(sbsr_pl);
1327         return err;
1328 }
1329
1330 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1331                               unsigned int sb_index)
1332 {
1333         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1334         LIST_HEAD(bulk_list);
1335         char *sbsr_pl;
1336         unsigned int masked_count;
1337         u8 local_port;
1338         int i;
1339         int err;
1340         int err2;
1341
1342         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1343         if (!sbsr_pl)
1344                 return -ENOMEM;
1345
1346         local_port = MLXSW_PORT_CPU_PORT;
1347 next_batch:
1348         masked_count = 0;
1349         mlxsw_reg_sbsr_pack(sbsr_pl, true);
1350         for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1351                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1352         for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1353                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1354         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1355                 if (!mlxsw_sp->ports[local_port])
1356                         continue;
1357                 if (local_port != MLXSW_PORT_CPU_PORT) {
1358                         /* Ingress quotas are not supported for the CPU port */
1359                         mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1360                                                              local_port, 1);
1361                 }
1362                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1363                 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1364                         err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1365                                                        &bulk_list);
1366                         if (err)
1367                                 goto out;
1368                 }
1369                 if (++masked_count == MASKED_COUNT_MAX)
1370                         goto do_query;
1371         }
1372
1373 do_query:
1374         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1375                                     &bulk_list, NULL, 0);
1376         if (err)
1377                 goto out;
1378         if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1379                 local_port++;
1380                 goto next_batch;
1381         }
1382
1383 out:
1384         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1385         if (!err)
1386                 err = err2;
1387         kfree(sbsr_pl);
1388         return err;
1389 }
1390
1391 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1392                                   unsigned int sb_index, u16 pool_index,
1393                                   u32 *p_cur, u32 *p_max)
1394 {
1395         struct mlxsw_sp_port *mlxsw_sp_port =
1396                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1397         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1398         u8 local_port = mlxsw_sp_port->local_port;
1399         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1400                                                        pool_index);
1401
1402         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1403         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1404         return 0;
1405 }
1406
1407 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1408                                      unsigned int sb_index, u16 tc_index,
1409                                      enum devlink_sb_pool_type pool_type,
1410                                      u32 *p_cur, u32 *p_max)
1411 {
1412         struct mlxsw_sp_port *mlxsw_sp_port =
1413                         mlxsw_core_port_driver_priv(mlxsw_core_port);
1414         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1415         u8 local_port = mlxsw_sp_port->local_port;
1416         u8 pg_buff = tc_index;
1417         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1418         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1419                                                        pg_buff, dir);
1420
1421         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1422         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1423         return 0;
1424 }