2 * Copyright (c) 2017 Mellanox Technologies Inc. All rights reserved.
3 * Copyright (c) 2010 Voltaire Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
36 #include <linux/export.h>
37 #include <net/netlink.h>
38 #include <net/net_namespace.h>
40 #include <rdma/rdma_netlink.h>
41 #include "core_priv.h"
43 #include "core_priv.h"
45 static DEFINE_MUTEX(rdma_nl_mutex);
46 static struct sock *nls;
48 const struct rdma_nl_cbs *cb_table;
49 } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
51 int rdma_nl_chk_listeners(unsigned int group)
53 return (netlink_has_listeners(nls, group)) ? 0 : -1;
55 EXPORT_SYMBOL(rdma_nl_chk_listeners);
57 static bool is_nl_msg_valid(unsigned int type, unsigned int op)
59 static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS - 1] = {
60 RDMA_NL_RDMA_CM_NUM_OPS,
67 * This BUILD_BUG_ON is intended to catch addition of new
68 * RDMA netlink protocol without updating the array above.
70 BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6);
72 if (type > RDMA_NL_NUM_CLIENTS - 1)
75 return (op < max_num_ops[type - 1]) ? true : false;
78 static bool is_nl_valid(unsigned int type, unsigned int op)
80 const struct rdma_nl_cbs *cb_table;
82 if (!is_nl_msg_valid(type, op))
85 cb_table = rdma_nl_types[type].cb_table;
86 if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
91 void rdma_nl_register(unsigned int index,
92 const struct rdma_nl_cbs cb_table[])
94 mutex_lock(&rdma_nl_mutex);
95 if (!is_nl_msg_valid(index, 0)) {
97 * All clients are not interesting in success/failure of
98 * this call. They want to see the print to error log and
99 * continue their initialization. Print warning for them,
100 * because it is programmer's error to be here.
102 mutex_unlock(&rdma_nl_mutex);
104 "The not-valid %u index was supplied to RDMA netlink\n",
109 if (rdma_nl_types[index].cb_table) {
110 mutex_unlock(&rdma_nl_mutex);
112 "The %u index is already registered in RDMA netlink\n",
117 rdma_nl_types[index].cb_table = cb_table;
118 mutex_unlock(&rdma_nl_mutex);
120 EXPORT_SYMBOL(rdma_nl_register);
122 void rdma_nl_unregister(unsigned int index)
124 mutex_lock(&rdma_nl_mutex);
125 rdma_nl_types[index].cb_table = NULL;
126 mutex_unlock(&rdma_nl_mutex);
128 EXPORT_SYMBOL(rdma_nl_unregister);
130 void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
131 int len, int client, int op, int flags)
133 *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags);
136 return nlmsg_data(*nlh);
138 EXPORT_SYMBOL(ibnl_put_msg);
140 int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
141 int len, void *data, int type)
143 if (nla_put(skb, type, len, data)) {
144 nlmsg_cancel(skb, nlh);
149 EXPORT_SYMBOL(ibnl_put_attr);
151 static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
152 struct netlink_ext_ack *extack)
154 int type = nlh->nlmsg_type;
155 unsigned int index = RDMA_NL_GET_CLIENT(type);
156 unsigned int op = RDMA_NL_GET_OP(type);
157 const struct rdma_nl_cbs *cb_table;
159 if (!is_nl_valid(index, op))
162 cb_table = rdma_nl_types[index].cb_table;
164 if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
165 !netlink_capable(skb, CAP_NET_ADMIN))
168 /* FIXME: Convert IWCM to properly handle doit callbacks */
169 if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
170 index == RDMA_NL_IWCM) {
171 struct netlink_dump_control c = {
172 .dump = cb_table[op].dump,
174 return netlink_dump_start(nls, skb, nlh, &c);
177 if (cb_table[op].doit)
178 return cb_table[op].doit(skb, nlh, extack);
184 * This function is similar to netlink_rcv_skb with one exception:
185 * It calls to the callback for the netlink messages without NLM_F_REQUEST
186 * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed
187 * for that consumer only.
189 static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
191 struct netlink_ext_ack *))
193 struct netlink_ext_ack extack = {};
194 struct nlmsghdr *nlh;
197 while (skb->len >= nlmsg_total_size(0)) {
200 nlh = nlmsg_hdr(skb);
203 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
207 * Generally speaking, the only requests are handled
208 * by the kernel, but RDMA_NL_LS is different, because it
209 * runs backward netlink scheme. Kernel initiates messages
210 * and waits for reply with data to keep pathrecord cache
213 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) &&
214 (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS))
217 /* Skip control messages */
218 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
221 err = cb(skb, nlh, &extack);
226 if (nlh->nlmsg_flags & NLM_F_ACK || err)
227 netlink_ack(skb, nlh, err, &extack);
230 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
231 if (msglen > skb->len)
233 skb_pull(skb, msglen);
239 static void rdma_nl_rcv(struct sk_buff *skb)
241 mutex_lock(&rdma_nl_mutex);
242 rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
243 mutex_unlock(&rdma_nl_mutex);
246 int rdma_nl_unicast(struct sk_buff *skb, u32 pid)
250 err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
251 return (err < 0) ? err : 0;
253 EXPORT_SYMBOL(rdma_nl_unicast);
255 int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid)
259 err = netlink_unicast(nls, skb, pid, 0);
260 return (err < 0) ? err : 0;
262 EXPORT_SYMBOL(rdma_nl_unicast_wait);
264 int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags)
266 return nlmsg_multicast(nls, skb, 0, group, flags);
268 EXPORT_SYMBOL(rdma_nl_multicast);
270 int __init rdma_nl_init(void)
272 struct netlink_kernel_cfg cfg = {
273 .input = rdma_nl_rcv,
276 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
280 nls->sk_sndtimeo = 10 * HZ;
284 void rdma_nl_exit(void)
288 for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
289 rdma_nl_unregister(idx);
291 netlink_kernel_release(nls);