2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <linux/module.h>
34 #include <linux/pid.h>
35 #include <linux/pid_namespace.h>
36 #include <net/netlink.h>
37 #include <rdma/rdma_cm.h>
38 #include <rdma/rdma_netlink.h>
40 #include "core_priv.h"
43 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
44 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
45 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
46 .len = IB_DEVICE_NAME_MAX - 1},
47 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
48 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
49 .len = IB_FW_VERSION_NAME_MAX - 1},
50 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
51 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
52 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
53 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
54 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
55 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
56 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
57 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
58 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
59 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
60 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
61 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
63 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
64 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
65 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
66 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
67 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
68 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
69 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
70 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
71 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
72 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
73 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
74 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
75 .len = TASK_COMM_LEN },
76 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
77 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
78 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
79 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
80 .len = sizeof(struct __kernel_sockaddr_storage) },
81 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
82 .len = sizeof(struct __kernel_sockaddr_storage) },
83 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
84 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
85 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
86 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
87 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
88 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
89 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
90 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
91 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
92 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
93 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
94 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
95 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
96 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
97 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
98 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
99 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
103 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
105 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
107 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
113 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
115 char fw[IB_FW_VERSION_NAME_MAX];
117 if (fill_nldev_handle(msg, device))
120 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
123 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
124 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
125 device->attrs.device_cap_flags,
126 RDMA_NLDEV_ATTR_PAD))
129 ib_get_device_fw_str(device, fw);
130 /* Device without FW has strlen(fw) = 0 */
131 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
134 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
135 be64_to_cpu(device->node_guid),
136 RDMA_NLDEV_ATTR_PAD))
138 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
139 be64_to_cpu(device->attrs.sys_image_guid),
140 RDMA_NLDEV_ATTR_PAD))
142 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
147 static int fill_port_info(struct sk_buff *msg,
148 struct ib_device *device, u32 port,
149 const struct net *net)
151 struct net_device *netdev = NULL;
152 struct ib_port_attr attr;
155 if (fill_nldev_handle(msg, device))
158 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
161 ret = ib_query_port(device, port, &attr);
165 BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
166 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
167 (u64)attr.port_cap_flags, RDMA_NLDEV_ATTR_PAD))
169 if (rdma_protocol_ib(device, port) &&
170 nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
171 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
173 if (rdma_protocol_ib(device, port)) {
174 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
176 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
178 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
181 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
183 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
186 if (device->get_netdev)
187 netdev = device->get_netdev(device, port);
189 if (netdev && net_eq(dev_net(netdev), net)) {
190 ret = nla_put_u32(msg,
191 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
194 ret = nla_put_string(msg,
195 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
204 static int fill_res_info_entry(struct sk_buff *msg,
205 const char *name, u64 curr)
207 struct nlattr *entry_attr;
209 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
213 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
215 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
216 RDMA_NLDEV_ATTR_PAD))
219 nla_nest_end(msg, entry_attr);
223 nla_nest_cancel(msg, entry_attr);
227 static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
229 static const char * const names[RDMA_RESTRACK_MAX] = {
230 [RDMA_RESTRACK_PD] = "pd",
231 [RDMA_RESTRACK_CQ] = "cq",
232 [RDMA_RESTRACK_QP] = "qp",
233 [RDMA_RESTRACK_CM_ID] = "cm_id",
234 [RDMA_RESTRACK_MR] = "mr",
237 struct rdma_restrack_root *res = &device->res;
238 struct nlattr *table_attr;
241 if (fill_nldev_handle(msg, device))
244 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
248 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
251 curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
252 ret = fill_res_info_entry(msg, names[i], curr);
257 nla_nest_end(msg, table_attr);
261 nla_nest_cancel(msg, table_attr);
265 static int fill_res_name_pid(struct sk_buff *msg,
266 struct rdma_restrack_entry *res)
269 * For user resources, user is should read /proc/PID/comm to get the
270 * name of the task file.
272 if (rdma_is_kernel_res(res)) {
273 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
277 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
278 task_pid_vnr(res->task)))
284 static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
285 struct rdma_restrack_entry *res, uint32_t port)
287 struct ib_qp *qp = container_of(res, struct ib_qp, res);
288 struct ib_qp_init_attr qp_init_attr;
289 struct nlattr *entry_attr;
290 struct ib_qp_attr qp_attr;
293 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
297 if (port && port != qp_attr.port_num)
300 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
304 /* In create_qp() port is not set yet */
305 if (qp_attr.port_num &&
306 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
309 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
311 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
312 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
313 qp_attr.dest_qp_num))
315 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
320 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
323 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
324 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
325 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
326 qp_attr.path_mig_state))
329 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
331 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
334 if (fill_res_name_pid(msg, res))
337 nla_nest_end(msg, entry_attr);
341 nla_nest_cancel(msg, entry_attr);
346 static int fill_res_cm_id_entry(struct sk_buff *msg,
347 struct netlink_callback *cb,
348 struct rdma_restrack_entry *res, uint32_t port)
350 struct rdma_id_private *id_priv =
351 container_of(res, struct rdma_id_private, res);
352 struct rdma_cm_id *cm_id = &id_priv->id;
353 struct nlattr *entry_attr;
355 if (port && port != cm_id->port_num)
358 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY);
362 if (cm_id->port_num &&
363 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
366 if (id_priv->qp_num) {
367 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
369 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
373 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
376 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
379 if (cm_id->route.addr.src_addr.ss_family &&
380 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
381 sizeof(cm_id->route.addr.src_addr),
382 &cm_id->route.addr.src_addr))
384 if (cm_id->route.addr.dst_addr.ss_family &&
385 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
386 sizeof(cm_id->route.addr.dst_addr),
387 &cm_id->route.addr.dst_addr))
390 if (fill_res_name_pid(msg, res))
393 nla_nest_end(msg, entry_attr);
397 nla_nest_cancel(msg, entry_attr);
402 static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
403 struct rdma_restrack_entry *res, uint32_t port)
405 struct ib_cq *cq = container_of(res, struct ib_cq, res);
406 struct nlattr *entry_attr;
408 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
412 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
414 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
415 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
418 /* Poll context is only valid for kernel CQs */
419 if (rdma_is_kernel_res(res) &&
420 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
423 if (fill_res_name_pid(msg, res))
426 nla_nest_end(msg, entry_attr);
430 nla_nest_cancel(msg, entry_attr);
435 static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
436 struct rdma_restrack_entry *res, uint32_t port)
438 struct ib_mr *mr = container_of(res, struct ib_mr, res);
439 struct nlattr *entry_attr;
441 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
445 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
446 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
448 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
450 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_IOVA,
451 mr->iova, RDMA_NLDEV_ATTR_PAD))
455 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
456 RDMA_NLDEV_ATTR_PAD))
459 if (fill_res_name_pid(msg, res))
462 nla_nest_end(msg, entry_attr);
466 nla_nest_cancel(msg, entry_attr);
471 static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
472 struct rdma_restrack_entry *res, uint32_t port)
474 struct ib_pd *pd = container_of(res, struct ib_pd, res);
475 struct nlattr *entry_attr;
477 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
481 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
482 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
485 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
486 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
487 pd->unsafe_global_rkey))
490 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
491 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
493 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
494 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
495 pd->unsafe_global_rkey))
498 if (fill_res_name_pid(msg, res))
501 nla_nest_end(msg, entry_attr);
505 nla_nest_cancel(msg, entry_attr);
510 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
511 struct netlink_ext_ack *extack)
513 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
514 struct ib_device *device;
519 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
520 nldev_policy, extack);
521 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
524 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
526 device = ib_device_get_by_index(index);
530 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
536 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
537 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
540 err = fill_dev_info(msg, device);
546 put_device(&device->dev);
547 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
552 put_device(&device->dev);
556 static int _nldev_get_dumpit(struct ib_device *device,
558 struct netlink_callback *cb,
561 int start = cb->args[0];
562 struct nlmsghdr *nlh;
567 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
568 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
571 if (fill_dev_info(skb, device)) {
572 nlmsg_cancel(skb, nlh);
580 out: cb->args[0] = idx;
584 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
587 * There is no need to take lock, because
588 * we are relying on ib_core's lists_rwsem
590 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
593 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
594 struct netlink_ext_ack *extack)
596 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
597 struct ib_device *device;
603 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
604 nldev_policy, extack);
606 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
607 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
610 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
611 device = ib_device_get_by_index(index);
615 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
616 if (!rdma_is_port_valid(device, port)) {
621 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
627 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
628 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
631 err = fill_port_info(msg, device, port, sock_net(skb->sk));
636 put_device(&device->dev);
638 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
643 put_device(&device->dev);
647 static int nldev_port_get_dumpit(struct sk_buff *skb,
648 struct netlink_callback *cb)
650 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
651 struct ib_device *device;
652 int start = cb->args[0];
653 struct nlmsghdr *nlh;
659 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
661 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
664 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
665 device = ib_device_get_by_index(ifindex);
669 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
671 * The dumpit function returns all information from specific
672 * index. This specific index is taken from the netlink
673 * messages request sent by user and it is available
676 * Usually, the user doesn't fill this field and it causes
677 * to return everything.
685 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
687 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
688 RDMA_NLDEV_CMD_PORT_GET),
691 if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
692 nlmsg_cancel(skb, nlh);
700 put_device(&device->dev);
705 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
706 struct netlink_ext_ack *extack)
708 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
709 struct ib_device *device;
714 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
715 nldev_policy, extack);
716 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
719 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
720 device = ib_device_get_by_index(index);
724 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
730 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
731 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
734 ret = fill_res_info(msg, device);
739 put_device(&device->dev);
740 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
745 put_device(&device->dev);
749 static int _nldev_res_get_dumpit(struct ib_device *device,
751 struct netlink_callback *cb,
754 int start = cb->args[0];
755 struct nlmsghdr *nlh;
760 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
761 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
764 if (fill_res_info(skb, device)) {
765 nlmsg_cancel(skb, nlh);
778 static int nldev_res_get_dumpit(struct sk_buff *skb,
779 struct netlink_callback *cb)
781 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
784 struct nldev_fill_res_entry {
785 int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb,
786 struct rdma_restrack_entry *res, u32 port);
787 enum rdma_nldev_attr nldev_attr;
788 enum rdma_nldev_command nldev_cmd;
791 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
792 [RDMA_RESTRACK_QP] = {
793 .fill_res_func = fill_res_qp_entry,
794 .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
795 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
797 [RDMA_RESTRACK_CM_ID] = {
798 .fill_res_func = fill_res_cm_id_entry,
799 .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
800 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
802 [RDMA_RESTRACK_CQ] = {
803 .fill_res_func = fill_res_cq_entry,
804 .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
805 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
807 [RDMA_RESTRACK_MR] = {
808 .fill_res_func = fill_res_mr_entry,
809 .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
810 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
812 [RDMA_RESTRACK_PD] = {
813 .fill_res_func = fill_res_pd_entry,
814 .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
815 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
819 static int res_get_common_dumpit(struct sk_buff *skb,
820 struct netlink_callback *cb,
821 enum rdma_restrack_type res_type)
823 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
824 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
825 struct rdma_restrack_entry *res;
826 int err, ret = 0, idx = 0;
827 struct nlattr *table_attr;
828 struct ib_device *device;
829 int start = cb->args[0];
830 struct nlmsghdr *nlh;
834 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
837 * Right now, we are expecting the device index to get res information,
838 * but it is possible to extend this code to return all devices in
839 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
840 * if it doesn't exist, we will iterate over all devices.
842 * But it is not needed for now.
844 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
847 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
848 device = ib_device_get_by_index(index);
853 * If no PORT_INDEX is supplied, we will return all QPs from that device
855 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
856 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
857 if (!rdma_is_port_valid(device, port)) {
863 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
864 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
867 if (fill_nldev_handle(skb, device)) {
872 table_attr = nla_nest_start(skb, fe->nldev_attr);
878 down_read(&device->res.rwsem);
879 hash_for_each_possible(device->res.hash, res, node, res_type) {
883 if ((rdma_is_kernel_res(res) &&
884 task_active_pid_ns(current) != &init_pid_ns) ||
885 (!rdma_is_kernel_res(res) && task_active_pid_ns(current) !=
886 task_active_pid_ns(res->task)))
888 * 1. Kern resources should be visible in init
890 * 2. Present only resources visible in the current
895 if (!rdma_restrack_get(res))
897 * Resource is under release now, but we are not
898 * relesing lock now, so it will be released in
899 * our next pass, once we will get ->next pointer.
905 up_read(&device->res.rwsem);
906 ret = fe->fill_res_func(skb, cb, res, port);
907 down_read(&device->res.rwsem);
909 * Return resource back, but it won't be released till
910 * the &device->res.rwsem will be released for write.
912 rdma_restrack_put(res);
914 if (ret == -EMSGSIZE)
916 * There is a chance to optimize here.
917 * It can be done by using list_prepare_entry
918 * and list_for_each_entry_continue afterwards.
925 up_read(&device->res.rwsem);
927 nla_nest_end(skb, table_attr);
932 * No more entries to fill, cancel the message and
933 * return 0 to mark end of dumpit.
938 put_device(&device->dev);
942 nla_nest_cancel(skb, table_attr);
943 up_read(&device->res.rwsem);
946 nlmsg_cancel(skb, nlh);
949 put_device(&device->dev);
953 static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
954 struct netlink_callback *cb)
956 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP);
959 static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb,
960 struct netlink_callback *cb)
962 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID);
965 static int nldev_res_get_cq_dumpit(struct sk_buff *skb,
966 struct netlink_callback *cb)
968 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ);
971 static int nldev_res_get_mr_dumpit(struct sk_buff *skb,
972 struct netlink_callback *cb)
974 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR);
977 static int nldev_res_get_pd_dumpit(struct sk_buff *skb,
978 struct netlink_callback *cb)
980 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD);
983 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
984 [RDMA_NLDEV_CMD_GET] = {
985 .doit = nldev_get_doit,
986 .dump = nldev_get_dumpit,
988 [RDMA_NLDEV_CMD_PORT_GET] = {
989 .doit = nldev_port_get_doit,
990 .dump = nldev_port_get_dumpit,
992 [RDMA_NLDEV_CMD_RES_GET] = {
993 .doit = nldev_res_get_doit,
994 .dump = nldev_res_get_dumpit,
996 [RDMA_NLDEV_CMD_RES_QP_GET] = {
997 .dump = nldev_res_get_qp_dumpit,
999 * .doit is not implemented yet for two reasons:
1000 * 1. It is not needed yet.
1001 * 2. There is a need to provide identifier, while it is easy
1002 * for the QPs (device index + port index + LQPN), it is not
1003 * the case for the rest of resources (PD and CQ). Because it
1004 * is better to provide similar interface for all resources,
1005 * let's wait till we will have other resources implemented
1009 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
1010 .dump = nldev_res_get_cm_id_dumpit,
1012 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
1013 .dump = nldev_res_get_cq_dumpit,
1015 [RDMA_NLDEV_CMD_RES_MR_GET] = {
1016 .dump = nldev_res_get_mr_dumpit,
1018 [RDMA_NLDEV_CMD_RES_PD_GET] = {
1019 .dump = nldev_res_get_pd_dumpit,
1023 void __init nldev_init(void)
1025 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
1028 void __exit nldev_exit(void)
1030 rdma_nl_unregister(RDMA_NL_NLDEV);
1033 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);