1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #ifdef CONFIG_DEBUG_FS
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
18 #include "rvu_struct.h"
22 #define DEBUGFS_DIR_NAME "octeontx2"
24 #define rvu_dbg_NULL NULL
25 #define rvu_dbg_open_NULL NULL
27 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
28 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
30 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
32 static const struct file_operations rvu_dbg_##name##_fops = { \
33 .owner = THIS_MODULE, \
34 .open = rvu_dbg_open_##name, \
36 .write = rvu_dbg_##write_op, \
37 .llseek = seq_lseek, \
38 .release = single_release, \
41 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
42 static const struct file_operations rvu_dbg_##name##_fops = { \
43 .owner = THIS_MODULE, \
44 .open = simple_open, \
45 .read = rvu_dbg_##read_op, \
46 .write = rvu_dbg_##write_op \
49 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
51 /* Dumps current provisioning status of all RVU block LFs */
52 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
54 size_t count, loff_t *ppos)
56 int index, off = 0, flag = 0, go_back = 0, off_prev;
57 struct rvu *rvu = filp->private_data;
58 int lf, pf, vf, pcifunc;
59 struct rvu_block block;
64 /* don't allow partial reads */
68 buf = kzalloc(buf_size, GFP_KERNEL);
71 off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
72 for (index = 0; index < BLK_COUNT; index++)
73 if (strlen(rvu->hw->block[index].name))
74 off += scnprintf(&buf[off], buf_size - 1 - off,
75 "%*s\t", (index - 1) * 2,
76 rvu->hw->block[index].name);
77 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
78 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
79 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
80 pcifunc = pf << 10 | vf;
85 go_back = scnprintf(&buf[off],
90 go_back = scnprintf(&buf[off],
96 for (index = 0; index < BLKTYPE_MAX; index++) {
97 block = rvu->hw->block[index];
98 if (!strlen(block.name))
101 for (lf = 0; lf < block.lf.max; lf++) {
102 if (block.fn_map[lf] != pcifunc)
105 off += scnprintf(&buf[off], buf_size - 1
108 if (flag && off_prev != off)
112 off += scnprintf(&buf[off], buf_size - 1 - off,
120 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
124 bytes_not_copied = copy_to_user(buffer, buf, off);
127 if (bytes_not_copied)
134 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
136 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
139 struct rvu_block *block;
140 struct rvu_hwinfo *hw;
143 blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
145 dev_warn(rvu->dev, "Invalid blktype\n");
150 block = &hw->block[blkaddr];
152 if (lf < 0 || lf >= block->lf.max) {
153 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
158 *pcifunc = block->fn_map[lf];
161 "This LF is not attached to any RVU PFFUNC\n");
167 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
171 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
175 if (!pfvf->aura_ctx) {
176 seq_puts(m, "Aura context is not initialized\n");
178 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
179 pfvf->aura_ctx->qsize);
180 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
181 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
184 if (!pfvf->pool_ctx) {
185 seq_puts(m, "Pool context is not initialized\n");
187 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
188 pfvf->pool_ctx->qsize);
189 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
190 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
195 /* The 'qsize' entry dumps current Aura/Pool context Qsize
196 * and each context's current enable/disable status in a bitmap.
198 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
201 void (*print_qsize)(struct seq_file *filp,
202 struct rvu_pfvf *pfvf) = NULL;
203 struct rvu_pfvf *pfvf;
211 qsize_id = rvu->rvu_dbg.npa_qsize_id;
212 print_qsize = print_npa_qsize;
216 qsize_id = rvu->rvu_dbg.nix_qsize_id;
217 print_qsize = print_nix_qsize;
224 if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
227 pfvf = rvu_get_pfvf(rvu, pcifunc);
228 print_qsize(filp, pfvf);
233 static ssize_t rvu_dbg_qsize_write(struct file *filp,
234 const char __user *buffer, size_t count,
235 loff_t *ppos, int blktype)
237 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
238 struct seq_file *seqfile = filp->private_data;
239 char *cmd_buf, *cmd_buf_tmp, *subtoken;
240 struct rvu *rvu = seqfile->private;
244 cmd_buf = memdup_user(buffer, count);
248 cmd_buf[count] = '\0';
250 cmd_buf_tmp = strchr(cmd_buf, '\n');
253 count = cmd_buf_tmp - cmd_buf + 1;
256 cmd_buf_tmp = cmd_buf;
257 subtoken = strsep(&cmd_buf, " ");
258 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
262 if (!strncmp(subtoken, "help", 4) || ret < 0) {
263 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
264 goto qsize_write_done;
267 if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
269 goto qsize_write_done;
271 if (blktype == BLKTYPE_NPA)
272 rvu->rvu_dbg.npa_qsize_id = lf;
274 rvu->rvu_dbg.nix_qsize_id = lf;
278 return ret ? ret : count;
281 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
282 const char __user *buffer,
283 size_t count, loff_t *ppos)
285 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
289 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
291 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
294 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
296 /* Dumps given NPA Aura's context */
297 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
299 struct npa_aura_s *aura = &rsp->aura;
301 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
303 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
304 aura->ena, aura->pool_caching);
305 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
306 aura->pool_way_mask, aura->avg_con);
307 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
308 aura->pool_drop_ena, aura->aura_drop_ena);
309 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
310 aura->bp_ena, aura->aura_drop);
311 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
312 aura->shift, aura->avg_level);
314 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
315 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
317 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
318 (u64)aura->limit, aura->bp, aura->fc_ena);
319 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
320 aura->fc_up_crossing, aura->fc_stype);
321 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
323 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
325 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
326 aura->pool_drop, aura->update_time);
327 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
328 aura->err_int, aura->err_int_ena);
329 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
330 aura->thresh_int, aura->thresh_int_ena);
331 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
332 aura->thresh_up, aura->thresh_qint_idx);
333 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
335 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
338 /* Dumps given NPA Pool's context */
339 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
341 struct npa_pool_s *pool = &rsp->pool;
343 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
345 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
346 pool->ena, pool->nat_align);
347 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
348 pool->stack_caching, pool->stack_way_mask);
349 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
350 pool->buf_offset, pool->buf_size);
352 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
353 pool->stack_max_pages, pool->stack_pages);
355 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
357 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
358 pool->stack_offset, pool->shift, pool->avg_level);
359 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
360 pool->avg_con, pool->fc_ena, pool->fc_stype);
361 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
362 pool->fc_hyst_bits, pool->fc_up_crossing);
363 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
365 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
367 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
369 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
371 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
372 pool->err_int, pool->err_int_ena);
373 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
374 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
375 pool->thresh_int_ena, pool->thresh_up);
376 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
377 pool->thresh_qint_idx, pool->err_qint_idx);
380 /* Reads aura/pool's ctx from admin queue */
381 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
383 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
384 struct npa_aq_enq_req aq_req;
385 struct npa_aq_enq_rsp rsp;
386 struct rvu_pfvf *pfvf;
387 int aura, rc, max_id;
395 case NPA_AQ_CTYPE_AURA:
396 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
397 id = rvu->rvu_dbg.npa_aura_ctx.id;
398 all = rvu->rvu_dbg.npa_aura_ctx.all;
401 case NPA_AQ_CTYPE_POOL:
402 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
403 id = rvu->rvu_dbg.npa_pool_ctx.id;
404 all = rvu->rvu_dbg.npa_pool_ctx.all;
410 if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
413 pfvf = rvu_get_pfvf(rvu, pcifunc);
414 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
415 seq_puts(m, "Aura context is not initialized\n");
417 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
418 seq_puts(m, "Pool context is not initialized\n");
422 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
423 aq_req.hdr.pcifunc = pcifunc;
424 aq_req.ctype = ctype;
425 aq_req.op = NPA_AQ_INSTOP_READ;
426 if (ctype == NPA_AQ_CTYPE_AURA) {
427 max_id = pfvf->aura_ctx->qsize;
428 print_npa_ctx = print_npa_aura_ctx;
430 max_id = pfvf->pool_ctx->qsize;
431 print_npa_ctx = print_npa_pool_ctx;
434 if (id < 0 || id >= max_id) {
435 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
436 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
446 for (aura = id; aura < max_id; aura++) {
447 aq_req.aura_id = aura;
448 seq_printf(m, "======%s : %d=======\n",
449 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
451 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
453 seq_puts(m, "Failed to read context\n");
456 print_npa_ctx(m, &rsp);
461 static int write_npa_ctx(struct rvu *rvu, bool all,
462 int npalf, int id, int ctype)
464 struct rvu_pfvf *pfvf;
468 if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
471 pfvf = rvu_get_pfvf(rvu, pcifunc);
473 if (ctype == NPA_AQ_CTYPE_AURA) {
474 if (!pfvf->aura_ctx) {
475 dev_warn(rvu->dev, "Aura context is not initialized\n");
478 max_id = pfvf->aura_ctx->qsize;
479 } else if (ctype == NPA_AQ_CTYPE_POOL) {
480 if (!pfvf->pool_ctx) {
481 dev_warn(rvu->dev, "Pool context is not initialized\n");
484 max_id = pfvf->pool_ctx->qsize;
487 if (id < 0 || id >= max_id) {
488 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
489 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
495 case NPA_AQ_CTYPE_AURA:
496 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
497 rvu->rvu_dbg.npa_aura_ctx.id = id;
498 rvu->rvu_dbg.npa_aura_ctx.all = all;
501 case NPA_AQ_CTYPE_POOL:
502 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
503 rvu->rvu_dbg.npa_pool_ctx.id = id;
504 rvu->rvu_dbg.npa_pool_ctx.all = all;
512 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
513 const char __user *buffer, int *npalf,
516 int bytes_not_copied;
521 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
522 if (bytes_not_copied)
525 cmd_buf[*count] = '\0';
526 cmd_buf_tmp = strchr(cmd_buf, '\n');
530 *count = cmd_buf_tmp - cmd_buf + 1;
533 subtoken = strsep(&cmd_buf, " ");
534 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
537 subtoken = strsep(&cmd_buf, " ");
538 if (subtoken && strcmp(subtoken, "all") == 0) {
541 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
550 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
551 const char __user *buffer,
552 size_t count, loff_t *ppos, int ctype)
554 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
556 struct seq_file *seqfp = filp->private_data;
557 struct rvu *rvu = seqfp->private;
558 int npalf, id = 0, ret;
561 if ((*ppos != 0) || !count)
564 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
567 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
571 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
572 ctype_string, ctype_string);
575 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
579 return ret ? ret : count;
582 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
583 const char __user *buffer,
584 size_t count, loff_t *ppos)
586 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
590 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
592 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
595 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
597 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
598 const char __user *buffer,
599 size_t count, loff_t *ppos)
601 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
605 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
607 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
610 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
612 /* Dumps given nix_sq's context */
613 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
615 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
617 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
618 sq_ctx->sqe_way_mask, sq_ctx->cq);
619 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
620 sq_ctx->sdp_mcast, sq_ctx->substream);
621 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
622 sq_ctx->qint_idx, sq_ctx->ena);
624 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
625 sq_ctx->sqb_count, sq_ctx->default_chan);
626 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
627 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
628 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
629 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
631 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
632 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
633 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
634 sq_ctx->sq_int, sq_ctx->sqb_aura);
635 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
637 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
638 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
639 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
640 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
641 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
642 sq_ctx->smenq_offset, sq_ctx->tail_offset);
643 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
644 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
645 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
646 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
647 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
648 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
650 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
651 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
652 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
653 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
654 sq_ctx->smenq_next_sqb);
656 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
658 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
659 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
660 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
661 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
662 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
663 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
664 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
666 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
667 (u64)sq_ctx->scm_lso_rem);
668 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
669 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
670 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
671 (u64)sq_ctx->dropped_octs);
672 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
673 (u64)sq_ctx->dropped_pkts);
676 /* Dumps given nix_rq's context */
677 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
679 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
681 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
682 rq_ctx->wqe_aura, rq_ctx->substream);
683 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
684 rq_ctx->cq, rq_ctx->ena_wqwd);
685 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
686 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
687 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
689 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
690 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
691 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
692 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
693 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
694 rq_ctx->pb_caching, rq_ctx->sso_tt);
695 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
696 rq_ctx->sso_grp, rq_ctx->lpb_aura);
697 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
699 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
700 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
701 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
702 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
703 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
704 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
705 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
706 rq_ctx->spb_ena, rq_ctx->wqe_skip);
707 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
709 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
710 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
711 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
712 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
713 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
714 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
715 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
716 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
718 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
719 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
720 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
721 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
722 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
723 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
724 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
726 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
727 rq_ctx->flow_tagw, rq_ctx->bad_utag);
728 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
729 rq_ctx->good_utag, rq_ctx->ltag);
731 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
732 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
733 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
734 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
735 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
738 /* Dumps given nix_cq's context */
739 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
741 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
743 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
745 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
746 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
747 cq_ctx->avg_con, cq_ctx->cint_idx);
748 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
749 cq_ctx->cq_err, cq_ctx->qint_idx);
750 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
751 cq_ctx->bpid, cq_ctx->bp_ena);
753 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
754 cq_ctx->update_time, cq_ctx->avg_level);
755 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
756 cq_ctx->head, cq_ctx->tail);
758 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
759 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
760 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
761 cq_ctx->qsize, cq_ctx->caching);
762 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
763 cq_ctx->substream, cq_ctx->ena);
764 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
765 cq_ctx->drop_ena, cq_ctx->drop);
766 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
769 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
770 void *unused, int ctype)
772 void (*print_nix_ctx)(struct seq_file *filp,
773 struct nix_aq_enq_rsp *rsp) = NULL;
774 struct rvu *rvu = filp->private;
775 struct nix_aq_enq_req aq_req;
776 struct nix_aq_enq_rsp rsp;
777 char *ctype_string = NULL;
778 int qidx, rc, max_id = 0;
779 struct rvu_pfvf *pfvf;
784 case NIX_AQ_CTYPE_CQ:
785 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
786 id = rvu->rvu_dbg.nix_cq_ctx.id;
787 all = rvu->rvu_dbg.nix_cq_ctx.all;
790 case NIX_AQ_CTYPE_SQ:
791 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
792 id = rvu->rvu_dbg.nix_sq_ctx.id;
793 all = rvu->rvu_dbg.nix_sq_ctx.all;
796 case NIX_AQ_CTYPE_RQ:
797 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
798 id = rvu->rvu_dbg.nix_rq_ctx.id;
799 all = rvu->rvu_dbg.nix_rq_ctx.all;
806 if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
809 pfvf = rvu_get_pfvf(rvu, pcifunc);
810 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
811 seq_puts(filp, "SQ context is not initialized\n");
813 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
814 seq_puts(filp, "RQ context is not initialized\n");
816 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
817 seq_puts(filp, "CQ context is not initialized\n");
821 if (ctype == NIX_AQ_CTYPE_SQ) {
822 max_id = pfvf->sq_ctx->qsize;
824 print_nix_ctx = print_nix_sq_ctx;
825 } else if (ctype == NIX_AQ_CTYPE_RQ) {
826 max_id = pfvf->rq_ctx->qsize;
828 print_nix_ctx = print_nix_rq_ctx;
829 } else if (ctype == NIX_AQ_CTYPE_CQ) {
830 max_id = pfvf->cq_ctx->qsize;
832 print_nix_ctx = print_nix_cq_ctx;
835 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
836 aq_req.hdr.pcifunc = pcifunc;
837 aq_req.ctype = ctype;
838 aq_req.op = NIX_AQ_INSTOP_READ;
843 for (qidx = id; qidx < max_id; qidx++) {
845 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
846 ctype_string, nixlf, aq_req.qidx);
847 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
849 seq_puts(filp, "Failed to read the context\n");
852 print_nix_ctx(filp, &rsp);
857 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
858 int id, int ctype, char *ctype_string)
860 struct rvu_pfvf *pfvf;
864 if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
867 pfvf = rvu_get_pfvf(rvu, pcifunc);
869 if (ctype == NIX_AQ_CTYPE_SQ) {
871 dev_warn(rvu->dev, "SQ context is not initialized\n");
874 max_id = pfvf->sq_ctx->qsize;
875 } else if (ctype == NIX_AQ_CTYPE_RQ) {
877 dev_warn(rvu->dev, "RQ context is not initialized\n");
880 max_id = pfvf->rq_ctx->qsize;
881 } else if (ctype == NIX_AQ_CTYPE_CQ) {
883 dev_warn(rvu->dev, "CQ context is not initialized\n");
886 max_id = pfvf->cq_ctx->qsize;
889 if (id < 0 || id >= max_id) {
890 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
891 ctype_string, max_id - 1);
895 case NIX_AQ_CTYPE_CQ:
896 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
897 rvu->rvu_dbg.nix_cq_ctx.id = id;
898 rvu->rvu_dbg.nix_cq_ctx.all = all;
901 case NIX_AQ_CTYPE_SQ:
902 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
903 rvu->rvu_dbg.nix_sq_ctx.id = id;
904 rvu->rvu_dbg.nix_sq_ctx.all = all;
907 case NIX_AQ_CTYPE_RQ:
908 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
909 rvu->rvu_dbg.nix_rq_ctx.id = id;
910 rvu->rvu_dbg.nix_rq_ctx.all = all;
918 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
919 const char __user *buffer,
920 size_t count, loff_t *ppos,
923 struct seq_file *m = filp->private_data;
924 struct rvu *rvu = m->private;
925 char *cmd_buf, *ctype_string;
926 int nixlf, id = 0, ret;
929 if ((*ppos != 0) || !count)
933 case NIX_AQ_CTYPE_SQ:
936 case NIX_AQ_CTYPE_RQ:
939 case NIX_AQ_CTYPE_CQ:
946 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
951 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
955 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
956 ctype_string, ctype_string);
959 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
964 return ret ? ret : count;
967 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
968 const char __user *buffer,
969 size_t count, loff_t *ppos)
971 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
975 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
977 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
980 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
982 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
983 const char __user *buffer,
984 size_t count, loff_t *ppos)
986 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
990 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
992 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
995 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
997 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
998 const char __user *buffer,
999 size_t count, loff_t *ppos)
1001 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1005 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1007 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1010 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1012 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1013 unsigned long *bmap, char *qtype)
1017 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1021 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1022 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1023 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1028 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1031 seq_puts(filp, "cq context is not initialized\n");
1033 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1037 seq_puts(filp, "rq context is not initialized\n");
1039 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1043 seq_puts(filp, "sq context is not initialized\n");
1045 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1049 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1050 const char __user *buffer,
1051 size_t count, loff_t *ppos)
1053 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1057 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1059 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1062 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1064 static void rvu_dbg_nix_init(struct rvu *rvu)
1066 const struct device *dev = &rvu->pdev->dev;
1067 struct dentry *pfile;
1069 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1070 if (!rvu->rvu_dbg.nix) {
1071 dev_err(rvu->dev, "create debugfs dir failed for nix\n");
1075 pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1076 &rvu_dbg_nix_sq_ctx_fops);
1080 pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1081 &rvu_dbg_nix_rq_ctx_fops);
1085 pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
1086 &rvu_dbg_nix_cq_ctx_fops);
1090 pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1091 &rvu_dbg_nix_qsize_fops);
1097 dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
1098 debugfs_remove_recursive(rvu->rvu_dbg.nix);
1101 static void rvu_dbg_npa_init(struct rvu *rvu)
1103 const struct device *dev = &rvu->pdev->dev;
1104 struct dentry *pfile;
1106 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1107 if (!rvu->rvu_dbg.npa)
1110 pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1111 &rvu_dbg_npa_qsize_fops);
1115 pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1116 &rvu_dbg_npa_aura_ctx_fops);
1120 pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1121 &rvu_dbg_npa_pool_ctx_fops);
1128 dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
1129 debugfs_remove_recursive(rvu->rvu_dbg.npa);
1132 void rvu_dbg_init(struct rvu *rvu)
1134 struct device *dev = &rvu->pdev->dev;
1135 struct dentry *pfile;
1137 rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
1138 if (!rvu->rvu_dbg.root) {
1139 dev_err(rvu->dev, "%s failed\n", __func__);
1142 pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
1143 &rvu_dbg_rsrc_status_fops);
1147 rvu_dbg_npa_init(rvu);
1148 rvu_dbg_nix_init(rvu);
1153 dev_err(dev, "Failed to create debugfs dir\n");
1154 debugfs_remove_recursive(rvu->rvu_dbg.root);
1157 void rvu_dbg_exit(struct rvu *rvu)
1159 debugfs_remove_recursive(rvu->rvu_dbg.root);
1162 #endif /* CONFIG_DEBUG_FS */