2 * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/restrack.h>
35 #include <uapi/rdma/rdma_netlink.h>
37 static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
40 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
42 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
44 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
46 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
48 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
50 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
52 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
54 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
56 if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size))
58 if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags))
65 static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
68 if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
70 if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
72 if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
74 if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
76 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
78 if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
80 if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
82 if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
84 if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
86 if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
93 static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx,
96 if (rdma_nl_put_driver_u32(msg, "idx", idx))
98 if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
100 if (rdma_nl_put_driver_u64_hex(msg, "wr_id", sqe->wr_id))
102 if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
107 if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
117 * Dump the first and last pending sqes.
119 static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq,
120 u16 first_idx, struct t4_swsqe *first_sqe,
121 u16 last_idx, struct t4_swsqe *last_sqe)
125 if (fill_swsqe(msg, sq, first_idx, first_sqe))
129 if (fill_swsqe(msg, sq, last_idx, last_sqe))
137 static int fill_swrqe(struct sk_buff *msg, struct t4_rq *rq, u16 idx,
138 struct t4_swrqe *rqe)
140 if (rdma_nl_put_driver_u32(msg, "idx", idx))
142 if (rdma_nl_put_driver_u64_hex(msg, "wr_id", rqe->wr_id))
150 * Dump the first and last pending rqes.
152 static int fill_swrqes(struct sk_buff *msg, struct t4_rq *rq,
153 u16 first_idx, struct t4_swrqe *first_rqe,
154 u16 last_idx, struct t4_swrqe *last_rqe)
158 if (fill_swrqe(msg, rq, first_idx, first_rqe))
162 if (fill_swrqe(msg, rq, last_idx, last_rqe))
170 static int fill_res_qp_entry(struct sk_buff *msg,
171 struct rdma_restrack_entry *res)
173 struct ib_qp *ibqp = container_of(res, struct ib_qp, res);
174 struct t4_swsqe *fsp = NULL, *lsp = NULL;
175 struct t4_swrqe *frp = NULL, *lrp = NULL;
176 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
177 struct t4_swsqe first_sqe, last_sqe;
178 struct t4_swrqe first_rqe, last_rqe;
179 u16 first_sq_idx, last_sq_idx;
180 u16 first_rq_idx, last_rq_idx;
181 struct nlattr *table_attr;
184 /* User qp state is not available, so don't dump user qps */
188 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
192 /* Get a consistent snapshot */
193 spin_lock_irq(&qhp->lock);
196 /* If there are any pending sqes, copy the first and last */
197 if (wq.sq.cidx != wq.sq.pidx) {
198 first_sq_idx = wq.sq.cidx;
199 first_sqe = qhp->wq.sq.sw_sq[first_sq_idx];
201 last_sq_idx = wq.sq.pidx;
202 if (last_sq_idx-- == 0)
203 last_sq_idx = wq.sq.size - 1;
204 if (last_sq_idx != first_sq_idx) {
205 last_sqe = qhp->wq.sq.sw_sq[last_sq_idx];
210 /* If there are any pending rqes, copy the first and last */
211 if (wq.rq.cidx != wq.rq.pidx) {
212 first_rq_idx = wq.rq.cidx;
213 first_rqe = qhp->wq.rq.sw_rq[first_rq_idx];
215 last_rq_idx = wq.rq.pidx;
216 if (last_rq_idx-- == 0)
217 last_rq_idx = wq.rq.size - 1;
218 if (last_rq_idx != first_rq_idx) {
219 last_rqe = qhp->wq.rq.sw_rq[last_rq_idx];
223 spin_unlock_irq(&qhp->lock);
225 if (fill_sq(msg, &wq))
226 goto err_cancel_table;
228 if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp))
229 goto err_cancel_table;
231 if (fill_rq(msg, &wq))
232 goto err_cancel_table;
234 if (fill_swrqes(msg, &wq.rq, first_rq_idx, frp, last_rq_idx, lrp))
235 goto err_cancel_table;
237 nla_nest_end(msg, table_attr);
241 nla_nest_cancel(msg, table_attr);
246 c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
247 [RDMA_RESTRACK_QP] = fill_res_qp_entry,