]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
125b94fc9f928bc8ad232ffe07986de595fde493
[linux.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_debugfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #ifdef CONFIG_DEBUG_FS
12
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17
18 #include "rvu_struct.h"
19 #include "rvu_reg.h"
20 #include "rvu.h"
21
22 #define DEBUGFS_DIR_NAME "octeontx2"
23
24 #define rvu_dbg_NULL NULL
25 #define rvu_dbg_open_NULL NULL
26
27 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)     \
28 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
29 { \
30         return single_open(file, rvu_dbg_##read_op, inode->i_private); \
31 } \
32 static const struct file_operations rvu_dbg_##name##_fops = { \
33         .owner          = THIS_MODULE, \
34         .open           = rvu_dbg_open_##name, \
35         .read           = seq_read, \
36         .write          = rvu_dbg_##write_op, \
37         .llseek         = seq_lseek, \
38         .release        = single_release, \
39 }
40
41 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
42 static const struct file_operations rvu_dbg_##name##_fops = { \
43         .owner = THIS_MODULE, \
44         .open = simple_open, \
45         .read = rvu_dbg_##read_op, \
46         .write = rvu_dbg_##write_op \
47 }
48
49 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
50
51 /* Dumps current provisioning status of all RVU block LFs */
52 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
53                                           char __user *buffer,
54                                           size_t count, loff_t *ppos)
55 {
56         int index, off = 0, flag = 0, go_back = 0, off_prev;
57         struct rvu *rvu = filp->private_data;
58         int lf, pf, vf, pcifunc;
59         struct rvu_block block;
60         int bytes_not_copied;
61         int buf_size = 2048;
62         char *buf;
63
64         /* don't allow partial reads */
65         if (*ppos != 0)
66                 return 0;
67
68         buf = kzalloc(buf_size, GFP_KERNEL);
69         if (!buf)
70                 return -ENOSPC;
71         off +=  scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
72         for (index = 0; index < BLK_COUNT; index++)
73                 if (strlen(rvu->hw->block[index].name))
74                         off +=  scnprintf(&buf[off], buf_size - 1 - off,
75                                           "%*s\t", (index - 1) * 2,
76                                           rvu->hw->block[index].name);
77         off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
78         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
79                 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
80                         pcifunc = pf << 10 | vf;
81                         if (!pcifunc)
82                                 continue;
83
84                         if (vf) {
85                                 go_back = scnprintf(&buf[off],
86                                                     buf_size - 1 - off,
87                                                     "PF%d:VF%d\t\t", pf,
88                                                     vf - 1);
89                         } else {
90                                 go_back = scnprintf(&buf[off],
91                                                     buf_size - 1 - off,
92                                                     "PF%d\t\t", pf);
93                         }
94
95                         off += go_back;
96                         for (index = 0; index < BLKTYPE_MAX; index++) {
97                                 block = rvu->hw->block[index];
98                                 if (!strlen(block.name))
99                                         continue;
100                                 off_prev = off;
101                                 for (lf = 0; lf < block.lf.max; lf++) {
102                                         if (block.fn_map[lf] != pcifunc)
103                                                 continue;
104                                         flag = 1;
105                                         off += scnprintf(&buf[off], buf_size - 1
106                                                         - off, "%3d,", lf);
107                                 }
108                                 if (flag && off_prev != off)
109                                         off--;
110                                 else
111                                         go_back++;
112                                 off += scnprintf(&buf[off], buf_size - 1 - off,
113                                                 "\t");
114                         }
115                         if (!flag)
116                                 off -= go_back;
117                         else
118                                 flag = 0;
119                         off--;
120                         off +=  scnprintf(&buf[off], buf_size - 1 - off, "\n");
121                 }
122         }
123
124         bytes_not_copied = copy_to_user(buffer, buf, off);
125         kfree(buf);
126
127         if (bytes_not_copied)
128                 return -EFAULT;
129
130         *ppos = off;
131         return off;
132 }
133
134 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
135
136 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
137                                 u16 *pcifunc)
138 {
139         struct rvu_block *block;
140         struct rvu_hwinfo *hw;
141         int blkaddr;
142
143         blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
144         if (blkaddr < 0) {
145                 dev_warn(rvu->dev, "Invalid blktype\n");
146                 return false;
147         }
148
149         hw = rvu->hw;
150         block = &hw->block[blkaddr];
151
152         if (lf < 0 || lf >= block->lf.max) {
153                 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
154                          block->lf.max - 1);
155                 return false;
156         }
157
158         *pcifunc = block->fn_map[lf];
159         if (!*pcifunc) {
160                 dev_warn(rvu->dev,
161                          "This LF is not attached to any RVU PFFUNC\n");
162                 return false;
163         }
164         return true;
165 }
166
167 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
168 {
169         char *buf;
170
171         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
172         if (!buf)
173                 return;
174
175         if (!pfvf->aura_ctx) {
176                 seq_puts(m, "Aura context is not initialized\n");
177         } else {
178                 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
179                                         pfvf->aura_ctx->qsize);
180                 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
181                 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
182         }
183
184         if (!pfvf->pool_ctx) {
185                 seq_puts(m, "Pool context is not initialized\n");
186         } else {
187                 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
188                                         pfvf->pool_ctx->qsize);
189                 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
190                 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
191         }
192         kfree(buf);
193 }
194
195 /* The 'qsize' entry dumps current Aura/Pool context Qsize
196  * and each context's current enable/disable status in a bitmap.
197  */
198 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
199                                  int blktype)
200 {
201         void (*print_qsize)(struct seq_file *filp,
202                             struct rvu_pfvf *pfvf) = NULL;
203         struct rvu_pfvf *pfvf;
204         struct rvu *rvu;
205         int qsize_id;
206         u16 pcifunc;
207
208         rvu = filp->private;
209         switch (blktype) {
210         case BLKTYPE_NPA:
211                 qsize_id = rvu->rvu_dbg.npa_qsize_id;
212                 print_qsize = print_npa_qsize;
213                 break;
214
215         case BLKTYPE_NIX:
216                 qsize_id = rvu->rvu_dbg.nix_qsize_id;
217                 print_qsize = print_nix_qsize;
218                 break;
219
220         default:
221                 return -EINVAL;
222         }
223
224         if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
225                 return -EINVAL;
226
227         pfvf = rvu_get_pfvf(rvu, pcifunc);
228         print_qsize(filp, pfvf);
229
230         return 0;
231 }
232
233 static ssize_t rvu_dbg_qsize_write(struct file *filp,
234                                    const char __user *buffer, size_t count,
235                                    loff_t *ppos, int blktype)
236 {
237         char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
238         struct seq_file *seqfile = filp->private_data;
239         char *cmd_buf, *cmd_buf_tmp, *subtoken;
240         struct rvu *rvu = seqfile->private;
241         u16 pcifunc;
242         int ret, lf;
243
244         cmd_buf = memdup_user(buffer, count);
245         if (IS_ERR(cmd_buf))
246                 return -ENOMEM;
247
248         cmd_buf[count] = '\0';
249
250         cmd_buf_tmp = strchr(cmd_buf, '\n');
251         if (cmd_buf_tmp) {
252                 *cmd_buf_tmp = '\0';
253                 count = cmd_buf_tmp - cmd_buf + 1;
254         }
255
256         cmd_buf_tmp = cmd_buf;
257         subtoken = strsep(&cmd_buf, " ");
258         ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
259         if (cmd_buf)
260                 ret = -EINVAL;
261
262         if (!strncmp(subtoken, "help", 4) || ret < 0) {
263                 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
264                 goto qsize_write_done;
265         }
266
267         if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
268                 ret = -EINVAL;
269                 goto qsize_write_done;
270         }
271         if (blktype  == BLKTYPE_NPA)
272                 rvu->rvu_dbg.npa_qsize_id = lf;
273         else
274                 rvu->rvu_dbg.nix_qsize_id = lf;
275
276 qsize_write_done:
277         kfree(cmd_buf_tmp);
278         return ret ? ret : count;
279 }
280
281 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
282                                        const char __user *buffer,
283                                        size_t count, loff_t *ppos)
284 {
285         return rvu_dbg_qsize_write(filp, buffer, count, ppos,
286                                             BLKTYPE_NPA);
287 }
288
289 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
290 {
291         return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
292 }
293
294 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
295
296 /* Dumps given NPA Aura's context */
297 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
298 {
299         struct npa_aura_s *aura = &rsp->aura;
300
301         seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
302
303         seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
304                    aura->ena, aura->pool_caching);
305         seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
306                    aura->pool_way_mask, aura->avg_con);
307         seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
308                    aura->pool_drop_ena, aura->aura_drop_ena);
309         seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
310                    aura->bp_ena, aura->aura_drop);
311         seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
312                    aura->shift, aura->avg_level);
313
314         seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
315                    (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
316
317         seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
318                    (u64)aura->limit, aura->bp, aura->fc_ena);
319         seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
320                    aura->fc_up_crossing, aura->fc_stype);
321         seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
322
323         seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
324
325         seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
326                    aura->pool_drop, aura->update_time);
327         seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
328                    aura->err_int, aura->err_int_ena);
329         seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
330                    aura->thresh_int, aura->thresh_int_ena);
331         seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
332                    aura->thresh_up, aura->thresh_qint_idx);
333         seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
334
335         seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
336 }
337
338 /* Dumps given NPA Pool's context */
339 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
340 {
341         struct npa_pool_s *pool = &rsp->pool;
342
343         seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
344
345         seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
346                    pool->ena, pool->nat_align);
347         seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
348                    pool->stack_caching, pool->stack_way_mask);
349         seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
350                    pool->buf_offset, pool->buf_size);
351
352         seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
353                    pool->stack_max_pages, pool->stack_pages);
354
355         seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
356
357         seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
358                    pool->stack_offset, pool->shift, pool->avg_level);
359         seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
360                    pool->avg_con, pool->fc_ena, pool->fc_stype);
361         seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
362                    pool->fc_hyst_bits, pool->fc_up_crossing);
363         seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
364
365         seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
366
367         seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
368
369         seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
370
371         seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
372                    pool->err_int, pool->err_int_ena);
373         seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
374         seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
375                    pool->thresh_int_ena, pool->thresh_up);
376         seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
377                    pool->thresh_qint_idx, pool->err_qint_idx);
378 }
379
380 /* Reads aura/pool's ctx from admin queue */
381 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
382 {
383         void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
384         struct npa_aq_enq_req aq_req;
385         struct npa_aq_enq_rsp rsp;
386         struct rvu_pfvf *pfvf;
387         int aura, rc, max_id;
388         int npalf, id, all;
389         struct rvu *rvu;
390         u16 pcifunc;
391
392         rvu = m->private;
393
394         switch (ctype) {
395         case NPA_AQ_CTYPE_AURA:
396                 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
397                 id = rvu->rvu_dbg.npa_aura_ctx.id;
398                 all = rvu->rvu_dbg.npa_aura_ctx.all;
399                 break;
400
401         case NPA_AQ_CTYPE_POOL:
402                 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
403                 id = rvu->rvu_dbg.npa_pool_ctx.id;
404                 all = rvu->rvu_dbg.npa_pool_ctx.all;
405                 break;
406         default:
407                 return -EINVAL;
408         }
409
410         if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
411                 return -EINVAL;
412
413         pfvf = rvu_get_pfvf(rvu, pcifunc);
414         if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
415                 seq_puts(m, "Aura context is not initialized\n");
416                 return -EINVAL;
417         } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
418                 seq_puts(m, "Pool context is not initialized\n");
419                 return -EINVAL;
420         }
421
422         memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
423         aq_req.hdr.pcifunc = pcifunc;
424         aq_req.ctype = ctype;
425         aq_req.op = NPA_AQ_INSTOP_READ;
426         if (ctype == NPA_AQ_CTYPE_AURA) {
427                 max_id = pfvf->aura_ctx->qsize;
428                 print_npa_ctx = print_npa_aura_ctx;
429         } else {
430                 max_id = pfvf->pool_ctx->qsize;
431                 print_npa_ctx = print_npa_pool_ctx;
432         }
433
434         if (id < 0 || id >= max_id) {
435                 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
436                            (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
437                         max_id - 1);
438                 return -EINVAL;
439         }
440
441         if (all)
442                 id = 0;
443         else
444                 max_id = id + 1;
445
446         for (aura = id; aura < max_id; aura++) {
447                 aq_req.aura_id = aura;
448                 seq_printf(m, "======%s : %d=======\n",
449                            (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
450                         aq_req.aura_id);
451                 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
452                 if (rc) {
453                         seq_puts(m, "Failed to read context\n");
454                         return -EINVAL;
455                 }
456                 print_npa_ctx(m, &rsp);
457         }
458         return 0;
459 }
460
461 static int write_npa_ctx(struct rvu *rvu, bool all,
462                          int npalf, int id, int ctype)
463 {
464         struct rvu_pfvf *pfvf;
465         int max_id = 0;
466         u16 pcifunc;
467
468         if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
469                 return -EINVAL;
470
471         pfvf = rvu_get_pfvf(rvu, pcifunc);
472
473         if (ctype == NPA_AQ_CTYPE_AURA) {
474                 if (!pfvf->aura_ctx) {
475                         dev_warn(rvu->dev, "Aura context is not initialized\n");
476                         return -EINVAL;
477                 }
478                 max_id = pfvf->aura_ctx->qsize;
479         } else if (ctype == NPA_AQ_CTYPE_POOL) {
480                 if (!pfvf->pool_ctx) {
481                         dev_warn(rvu->dev, "Pool context is not initialized\n");
482                         return -EINVAL;
483                 }
484                 max_id = pfvf->pool_ctx->qsize;
485         }
486
487         if (id < 0 || id >= max_id) {
488                 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
489                          (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
490                         max_id - 1);
491                 return -EINVAL;
492         }
493
494         switch (ctype) {
495         case NPA_AQ_CTYPE_AURA:
496                 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
497                 rvu->rvu_dbg.npa_aura_ctx.id = id;
498                 rvu->rvu_dbg.npa_aura_ctx.all = all;
499                 break;
500
501         case NPA_AQ_CTYPE_POOL:
502                 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
503                 rvu->rvu_dbg.npa_pool_ctx.id = id;
504                 rvu->rvu_dbg.npa_pool_ctx.all = all;
505                 break;
506         default:
507                 return -EINVAL;
508         }
509         return 0;
510 }
511
512 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
513                                 const char __user *buffer, int *npalf,
514                                 int *id, bool *all)
515 {
516         int bytes_not_copied;
517         char *cmd_buf_tmp;
518         char *subtoken;
519         int ret;
520
521         bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
522         if (bytes_not_copied)
523                 return -EFAULT;
524
525         cmd_buf[*count] = '\0';
526         cmd_buf_tmp = strchr(cmd_buf, '\n');
527
528         if (cmd_buf_tmp) {
529                 *cmd_buf_tmp = '\0';
530                 *count = cmd_buf_tmp - cmd_buf + 1;
531         }
532
533         subtoken = strsep(&cmd_buf, " ");
534         ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
535         if (ret < 0)
536                 return ret;
537         subtoken = strsep(&cmd_buf, " ");
538         if (subtoken && strcmp(subtoken, "all") == 0) {
539                 *all = true;
540         } else {
541                 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
542                 if (ret < 0)
543                         return ret;
544         }
545         if (cmd_buf)
546                 return -EINVAL;
547         return ret;
548 }
549
550 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
551                                      const char __user *buffer,
552                                      size_t count, loff_t *ppos, int ctype)
553 {
554         char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
555                                         "aura" : "pool";
556         struct seq_file *seqfp = filp->private_data;
557         struct rvu *rvu = seqfp->private;
558         int npalf, id = 0, ret;
559         bool all = false;
560
561         if ((*ppos != 0) || !count)
562                 return -EINVAL;
563
564         cmd_buf = kzalloc(count + 1, GFP_KERNEL);
565         if (!cmd_buf)
566                 return count;
567         ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
568                                    &npalf, &id, &all);
569         if (ret < 0) {
570                 dev_info(rvu->dev,
571                          "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
572                          ctype_string, ctype_string);
573                 goto done;
574         } else {
575                 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
576         }
577 done:
578         kfree(cmd_buf);
579         return ret ? ret : count;
580 }
581
582 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
583                                           const char __user *buffer,
584                                           size_t count, loff_t *ppos)
585 {
586         return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
587                                      NPA_AQ_CTYPE_AURA);
588 }
589
590 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
591 {
592         return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
593 }
594
595 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
596
597 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
598                                           const char __user *buffer,
599                                           size_t count, loff_t *ppos)
600 {
601         return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
602                                      NPA_AQ_CTYPE_POOL);
603 }
604
605 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
606 {
607         return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
608 }
609
610 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
611
612 /* Dumps given nix_sq's context */
613 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
614 {
615         struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
616
617         seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
618                    sq_ctx->sqe_way_mask, sq_ctx->cq);
619         seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
620                    sq_ctx->sdp_mcast, sq_ctx->substream);
621         seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
622                    sq_ctx->qint_idx, sq_ctx->ena);
623
624         seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
625                    sq_ctx->sqb_count, sq_ctx->default_chan);
626         seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
627                    sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
628         seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
629                    sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
630
631         seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
632                    sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
633         seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
634                    sq_ctx->sq_int, sq_ctx->sqb_aura);
635         seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
636
637         seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
638                    sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
639         seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
640                    sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
641         seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
642                    sq_ctx->smenq_offset, sq_ctx->tail_offset);
643         seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
644                    sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
645         seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
646                    sq_ctx->mnq_dis, sq_ctx->lmt_dis);
647         seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
648                    sq_ctx->cq_limit, sq_ctx->max_sqe_size);
649
650         seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
651         seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
652         seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
653         seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
654                    sq_ctx->smenq_next_sqb);
655
656         seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
657
658         seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
659                    sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
660         seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
661                    sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
662         seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
663                    sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
664         seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
665
666         seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
667                    (u64)sq_ctx->scm_lso_rem);
668         seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
669         seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
670         seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
671                    (u64)sq_ctx->dropped_octs);
672         seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
673                    (u64)sq_ctx->dropped_pkts);
674 }
675
676 /* Dumps given nix_rq's context */
677 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
678 {
679         struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
680
681         seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
682                    rq_ctx->wqe_aura, rq_ctx->substream);
683         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
684                    rq_ctx->cq, rq_ctx->ena_wqwd);
685         seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
686                    rq_ctx->ipsech_ena, rq_ctx->sso_ena);
687         seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
688
689         seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
690                    rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
691         seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
692                    rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
693         seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
694                    rq_ctx->pb_caching, rq_ctx->sso_tt);
695         seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
696                    rq_ctx->sso_grp, rq_ctx->lpb_aura);
697         seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
698
699         seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
700                    rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
701         seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
702                    rq_ctx->xqe_imm_size, rq_ctx->later_skip);
703         seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
704                    rq_ctx->first_skip, rq_ctx->lpb_sizem1);
705         seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
706                    rq_ctx->spb_ena, rq_ctx->wqe_skip);
707         seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
708
709         seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
710                    rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
711         seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
712                    rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
713         seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
714                    rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
715         seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
716                    rq_ctx->xqe_pass, rq_ctx->xqe_drop);
717
718         seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
719                    rq_ctx->qint_idx, rq_ctx->rq_int_ena);
720         seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
721                    rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
722         seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
723                    rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
724         seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
725
726         seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
727                    rq_ctx->flow_tagw, rq_ctx->bad_utag);
728         seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
729                    rq_ctx->good_utag, rq_ctx->ltag);
730
731         seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
732         seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
733         seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
734         seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
735         seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
736 }
737
738 /* Dumps given nix_cq's context */
739 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
740 {
741         struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
742
743         seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
744
745         seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
746         seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
747                    cq_ctx->avg_con, cq_ctx->cint_idx);
748         seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
749                    cq_ctx->cq_err, cq_ctx->qint_idx);
750         seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
751                    cq_ctx->bpid, cq_ctx->bp_ena);
752
753         seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
754                    cq_ctx->update_time, cq_ctx->avg_level);
755         seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
756                    cq_ctx->head, cq_ctx->tail);
757
758         seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
759                    cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
760         seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
761                    cq_ctx->qsize, cq_ctx->caching);
762         seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
763                    cq_ctx->substream, cq_ctx->ena);
764         seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
765                    cq_ctx->drop_ena, cq_ctx->drop);
766         seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
767 }
768
769 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
770                                          void *unused, int ctype)
771 {
772         void (*print_nix_ctx)(struct seq_file *filp,
773                               struct nix_aq_enq_rsp *rsp) = NULL;
774         struct rvu *rvu = filp->private;
775         struct nix_aq_enq_req aq_req;
776         struct nix_aq_enq_rsp rsp;
777         char *ctype_string = NULL;
778         int qidx, rc, max_id = 0;
779         struct rvu_pfvf *pfvf;
780         int nixlf, id, all;
781         u16 pcifunc;
782
783         switch (ctype) {
784         case NIX_AQ_CTYPE_CQ:
785                 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
786                 id = rvu->rvu_dbg.nix_cq_ctx.id;
787                 all = rvu->rvu_dbg.nix_cq_ctx.all;
788                 break;
789
790         case NIX_AQ_CTYPE_SQ:
791                 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
792                 id = rvu->rvu_dbg.nix_sq_ctx.id;
793                 all = rvu->rvu_dbg.nix_sq_ctx.all;
794                 break;
795
796         case NIX_AQ_CTYPE_RQ:
797                 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
798                 id = rvu->rvu_dbg.nix_rq_ctx.id;
799                 all = rvu->rvu_dbg.nix_rq_ctx.all;
800                 break;
801
802         default:
803                 return -EINVAL;
804         }
805
806         if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
807                 return -EINVAL;
808
809         pfvf = rvu_get_pfvf(rvu, pcifunc);
810         if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
811                 seq_puts(filp, "SQ context is not initialized\n");
812                 return -EINVAL;
813         } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
814                 seq_puts(filp, "RQ context is not initialized\n");
815                 return -EINVAL;
816         } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
817                 seq_puts(filp, "CQ context is not initialized\n");
818                 return -EINVAL;
819         }
820
821         if (ctype == NIX_AQ_CTYPE_SQ) {
822                 max_id = pfvf->sq_ctx->qsize;
823                 ctype_string = "sq";
824                 print_nix_ctx = print_nix_sq_ctx;
825         } else if (ctype == NIX_AQ_CTYPE_RQ) {
826                 max_id = pfvf->rq_ctx->qsize;
827                 ctype_string = "rq";
828                 print_nix_ctx = print_nix_rq_ctx;
829         } else if (ctype == NIX_AQ_CTYPE_CQ) {
830                 max_id = pfvf->cq_ctx->qsize;
831                 ctype_string = "cq";
832                 print_nix_ctx = print_nix_cq_ctx;
833         }
834
835         memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
836         aq_req.hdr.pcifunc = pcifunc;
837         aq_req.ctype = ctype;
838         aq_req.op = NIX_AQ_INSTOP_READ;
839         if (all)
840                 id = 0;
841         else
842                 max_id = id + 1;
843         for (qidx = id; qidx < max_id; qidx++) {
844                 aq_req.qidx = qidx;
845                 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
846                            ctype_string, nixlf, aq_req.qidx);
847                 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
848                 if (rc) {
849                         seq_puts(filp, "Failed to read the context\n");
850                         return -EINVAL;
851                 }
852                 print_nix_ctx(filp, &rsp);
853         }
854         return 0;
855 }
856
857 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
858                                int id, int ctype, char *ctype_string)
859 {
860         struct rvu_pfvf *pfvf;
861         int max_id = 0;
862         u16 pcifunc;
863
864         if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
865                 return -EINVAL;
866
867         pfvf = rvu_get_pfvf(rvu, pcifunc);
868
869         if (ctype == NIX_AQ_CTYPE_SQ) {
870                 if (!pfvf->sq_ctx) {
871                         dev_warn(rvu->dev, "SQ context is not initialized\n");
872                         return -EINVAL;
873                 }
874                 max_id = pfvf->sq_ctx->qsize;
875         } else if (ctype == NIX_AQ_CTYPE_RQ) {
876                 if (!pfvf->rq_ctx) {
877                         dev_warn(rvu->dev, "RQ context is not initialized\n");
878                         return -EINVAL;
879                 }
880                 max_id = pfvf->rq_ctx->qsize;
881         } else if (ctype == NIX_AQ_CTYPE_CQ) {
882                 if (!pfvf->cq_ctx) {
883                         dev_warn(rvu->dev, "CQ context is not initialized\n");
884                         return -EINVAL;
885                 }
886                 max_id = pfvf->cq_ctx->qsize;
887         }
888
889         if (id < 0 || id >= max_id) {
890                 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
891                          ctype_string, max_id - 1);
892                 return -EINVAL;
893         }
894         switch (ctype) {
895         case NIX_AQ_CTYPE_CQ:
896                 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
897                 rvu->rvu_dbg.nix_cq_ctx.id = id;
898                 rvu->rvu_dbg.nix_cq_ctx.all = all;
899                 break;
900
901         case NIX_AQ_CTYPE_SQ:
902                 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
903                 rvu->rvu_dbg.nix_sq_ctx.id = id;
904                 rvu->rvu_dbg.nix_sq_ctx.all = all;
905                 break;
906
907         case NIX_AQ_CTYPE_RQ:
908                 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
909                 rvu->rvu_dbg.nix_rq_ctx.id = id;
910                 rvu->rvu_dbg.nix_rq_ctx.all = all;
911                 break;
912         default:
913                 return -EINVAL;
914         }
915         return 0;
916 }
917
918 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
919                                            const char __user *buffer,
920                                            size_t count, loff_t *ppos,
921                                            int ctype)
922 {
923         struct seq_file *m = filp->private_data;
924         struct rvu *rvu = m->private;
925         char *cmd_buf, *ctype_string;
926         int nixlf, id = 0, ret;
927         bool all = false;
928
929         if ((*ppos != 0) || !count)
930                 return -EINVAL;
931
932         switch (ctype) {
933         case NIX_AQ_CTYPE_SQ:
934                 ctype_string = "sq";
935                 break;
936         case NIX_AQ_CTYPE_RQ:
937                 ctype_string = "rq";
938                 break;
939         case NIX_AQ_CTYPE_CQ:
940                 ctype_string = "cq";
941                 break;
942         default:
943                 return -EINVAL;
944         }
945
946         cmd_buf = kzalloc(count + 1, GFP_KERNEL);
947
948         if (!cmd_buf)
949                 return count;
950
951         ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
952                                    &nixlf, &id, &all);
953         if (ret < 0) {
954                 dev_info(rvu->dev,
955                          "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
956                          ctype_string, ctype_string);
957                 goto done;
958         } else {
959                 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
960                                           ctype_string);
961         }
962 done:
963         kfree(cmd_buf);
964         return ret ? ret : count;
965 }
966
967 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
968                                         const char __user *buffer,
969                                         size_t count, loff_t *ppos)
970 {
971         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
972                                             NIX_AQ_CTYPE_SQ);
973 }
974
975 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
976 {
977         return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
978 }
979
980 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
981
982 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
983                                         const char __user *buffer,
984                                         size_t count, loff_t *ppos)
985 {
986         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
987                                             NIX_AQ_CTYPE_RQ);
988 }
989
990 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
991 {
992         return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
993 }
994
995 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
996
997 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
998                                         const char __user *buffer,
999                                         size_t count, loff_t *ppos)
1000 {
1001         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1002                                             NIX_AQ_CTYPE_CQ);
1003 }
1004
1005 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1006 {
1007         return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1008 }
1009
1010 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1011
1012 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1013                                  unsigned long *bmap, char *qtype)
1014 {
1015         char *buf;
1016
1017         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1018         if (!buf)
1019                 return;
1020
1021         bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1022         seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1023         seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1024                    qtype, buf);
1025         kfree(buf);
1026 }
1027
1028 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1029 {
1030         if (!pfvf->cq_ctx)
1031                 seq_puts(filp, "cq context is not initialized\n");
1032         else
1033                 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1034                                      "cq");
1035
1036         if (!pfvf->rq_ctx)
1037                 seq_puts(filp, "rq context is not initialized\n");
1038         else
1039                 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1040                                      "rq");
1041
1042         if (!pfvf->sq_ctx)
1043                 seq_puts(filp, "sq context is not initialized\n");
1044         else
1045                 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1046                                      "sq");
1047 }
1048
1049 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1050                                        const char __user *buffer,
1051                                        size_t count, loff_t *ppos)
1052 {
1053         return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1054                                    BLKTYPE_NIX);
1055 }
1056
1057 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1058 {
1059         return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1060 }
1061
1062 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1063
1064 static void rvu_dbg_nix_init(struct rvu *rvu)
1065 {
1066         const struct device *dev = &rvu->pdev->dev;
1067         struct dentry *pfile;
1068
1069         rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1070         if (!rvu->rvu_dbg.nix) {
1071                 dev_err(rvu->dev, "create debugfs dir failed for nix\n");
1072                 return;
1073         }
1074
1075         pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1076                                     &rvu_dbg_nix_sq_ctx_fops);
1077         if (!pfile)
1078                 goto create_failed;
1079
1080         pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1081                                     &rvu_dbg_nix_rq_ctx_fops);
1082         if (!pfile)
1083                 goto create_failed;
1084
1085         pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1086                                     &rvu_dbg_nix_cq_ctx_fops);
1087         if (!pfile)
1088                 goto create_failed;
1089
1090         pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1091                                     &rvu_dbg_nix_qsize_fops);
1092         if (!pfile)
1093                 goto create_failed;
1094
1095         return;
1096 create_failed:
1097         dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
1098         debugfs_remove_recursive(rvu->rvu_dbg.nix);
1099 }
1100
1101 static void rvu_dbg_npa_init(struct rvu *rvu)
1102 {
1103         const struct device *dev = &rvu->pdev->dev;
1104         struct dentry *pfile;
1105
1106         rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1107         if (!rvu->rvu_dbg.npa)
1108                 return;
1109
1110         pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1111                                     &rvu_dbg_npa_qsize_fops);
1112         if (!pfile)
1113                 goto create_failed;
1114
1115         pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1116                                     &rvu_dbg_npa_aura_ctx_fops);
1117         if (!pfile)
1118                 goto create_failed;
1119
1120         pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1121                                     &rvu_dbg_npa_pool_ctx_fops);
1122         if (!pfile)
1123                 goto create_failed;
1124
1125         return;
1126
1127 create_failed:
1128         dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
1129         debugfs_remove_recursive(rvu->rvu_dbg.npa);
1130 }
1131
1132 void rvu_dbg_init(struct rvu *rvu)
1133 {
1134         struct device *dev = &rvu->pdev->dev;
1135         struct dentry *pfile;
1136
1137         rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
1138         if (!rvu->rvu_dbg.root) {
1139                 dev_err(rvu->dev, "%s failed\n", __func__);
1140                 return;
1141         }
1142         pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
1143                                     &rvu_dbg_rsrc_status_fops);
1144         if (!pfile)
1145                 goto create_failed;
1146
1147         rvu_dbg_npa_init(rvu);
1148         rvu_dbg_nix_init(rvu);
1149
1150         return;
1151
1152 create_failed:
1153         dev_err(dev, "Failed to create debugfs dir\n");
1154         debugfs_remove_recursive(rvu->rvu_dbg.root);
1155 }
1156
1157 void rvu_dbg_exit(struct rvu *rvu)
1158 {
1159         debugfs_remove_recursive(rvu->rvu_dbg.root);
1160 }
1161
1162 #endif /* CONFIG_DEBUG_FS */