2 * NETLINK Generic Netlink Family
4 * Authors: Jamal Hadi Salim
5 * Thomas Graf <tgraf@suug.ch>
6 * Johannes Berg <johannes@sipsolutions.net>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/types.h>
14 #include <linux/socket.h>
15 #include <linux/string.h>
16 #include <linux/skbuff.h>
17 #include <linux/mutex.h>
18 #include <linux/bitmap.h>
19 #include <linux/rwsem.h>
21 #include <net/genetlink.h>
23 static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
24 static DECLARE_RWSEM(cb_lock);
26 atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
27 DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
31 mutex_lock(&genl_mutex);
33 EXPORT_SYMBOL(genl_lock);
35 void genl_unlock(void)
37 mutex_unlock(&genl_mutex);
39 EXPORT_SYMBOL(genl_unlock);
42 bool lockdep_genl_is_held(void)
44 return lockdep_is_held(&genl_mutex);
46 EXPORT_SYMBOL(lockdep_genl_is_held);
49 static void genl_lock_all(void)
55 static void genl_unlock_all(void)
61 #define GENL_FAM_TAB_SIZE 16
62 #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
64 static struct list_head family_ht[GENL_FAM_TAB_SIZE];
66 * Bitmap of multicast groups that are currently in use.
68 * To avoid an allocation at boot of just one unsigned long,
69 * declare it global instead.
70 * Bit 0 is marked as already used since group 0 is invalid.
71 * Bit 1 is marked as already used since the drop-monitor code
72 * abuses the API and thinks it can statically use group 1.
73 * That group will typically conflict with other groups that
74 * any proper users use.
75 * Bit 16 is marked as used since it's used for generic netlink
76 * and the code no longer marks pre-reserved IDs as used.
77 * Bit 17 is marked as already used since the VFS quota code
78 * also abused this API and relied on family == group ID, we
79 * cater to that by giving it a static family and group ID.
80 * Bit 18 is marked as already used since the PMCRAID driver
81 * did the same thing as the VFS quota code (maybe copied?)
83 static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
84 BIT(GENL_ID_VFS_DQUOT) |
86 static unsigned long *mc_groups = &mc_group_start;
87 static unsigned long mc_groups_longs = 1;
89 static int genl_ctrl_event(int event, struct genl_family *family,
90 const struct genl_multicast_group *grp,
93 static inline unsigned int genl_family_hash(unsigned int id)
95 return id & GENL_FAM_TAB_MASK;
98 static inline struct list_head *genl_family_chain(unsigned int id)
100 return &family_ht[genl_family_hash(id)];
103 static struct genl_family *genl_family_find_byid(unsigned int id)
105 struct genl_family *f;
107 list_for_each_entry(f, genl_family_chain(id), family_list)
114 static struct genl_family *genl_family_find_byname(char *name)
116 struct genl_family *f;
119 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
120 list_for_each_entry(f, genl_family_chain(i), family_list)
121 if (strcmp(f->name, name) == 0)
127 static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
131 for (i = 0; i < family->n_ops; i++)
132 if (family->ops[i].cmd == cmd)
133 return &family->ops[i];
138 /* Of course we are going to have problems once we hit
139 * 2^16 alive types, but that can only happen by year 2K
141 static u16 genl_generate_id(void)
143 static u16 id_gen_idx = GENL_MIN_ID;
146 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
147 if (id_gen_idx != GENL_ID_VFS_DQUOT &&
148 id_gen_idx != GENL_ID_PMCRAID &&
149 !genl_family_find_byid(id_gen_idx))
151 if (++id_gen_idx > GENL_MAX_ID)
152 id_gen_idx = GENL_MIN_ID;
158 static int genl_allocate_reserve_groups(int n_groups, int *first_id)
160 unsigned long *new_groups;
168 id = find_first_zero_bit(mc_groups,
172 id = find_next_zero_bit(mc_groups,
173 mc_groups_longs * BITS_PER_LONG,
178 i < min_t(int, id + n_groups,
179 mc_groups_longs * BITS_PER_LONG);
181 if (test_bit(i, mc_groups)) {
188 if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
189 unsigned long new_longs = mc_groups_longs +
190 BITS_TO_LONGS(n_groups);
191 size_t nlen = new_longs * sizeof(unsigned long);
193 if (mc_groups == &mc_group_start) {
194 new_groups = kzalloc(nlen, GFP_KERNEL);
197 mc_groups = new_groups;
198 *mc_groups = mc_group_start;
200 new_groups = krealloc(mc_groups, nlen,
204 mc_groups = new_groups;
205 for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
206 mc_groups[mc_groups_longs + i] = 0;
208 mc_groups_longs = new_longs;
212 for (i = id; i < id + n_groups; i++)
213 set_bit(i, mc_groups);
218 static struct genl_family genl_ctrl;
220 static int genl_validate_assign_mc_groups(struct genl_family *family)
223 int n_groups = family->n_mcgrps;
225 bool groups_allocated = false;
230 for (i = 0; i < n_groups; i++) {
231 const struct genl_multicast_group *grp = &family->mcgrps[i];
233 if (WARN_ON(grp->name[0] == '\0'))
235 if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL))
239 /* special-case our own group and hacks */
240 if (family == &genl_ctrl) {
241 first_id = GENL_ID_CTRL;
242 BUG_ON(n_groups != 1);
243 } else if (strcmp(family->name, "NET_DM") == 0) {
245 BUG_ON(n_groups != 1);
246 } else if (family->id == GENL_ID_VFS_DQUOT) {
247 first_id = GENL_ID_VFS_DQUOT;
248 BUG_ON(n_groups != 1);
249 } else if (family->id == GENL_ID_PMCRAID) {
250 first_id = GENL_ID_PMCRAID;
251 BUG_ON(n_groups != 1);
253 groups_allocated = true;
254 err = genl_allocate_reserve_groups(n_groups, &first_id);
259 family->mcgrp_offset = first_id;
261 /* if still initializing, can't and don't need to to realloc bitmaps */
262 if (!init_net.genl_sock)
265 if (family->netnsok) {
268 netlink_table_grab();
270 for_each_net_rcu(net) {
271 err = __netlink_change_ngroups(net->genl_sock,
272 mc_groups_longs * BITS_PER_LONG);
275 * No need to roll back, can only fail if
276 * memory allocation fails and then the
277 * number of _possible_ groups has been
278 * increased on some sockets which is ok.
284 netlink_table_ungrab();
286 err = netlink_change_ngroups(init_net.genl_sock,
287 mc_groups_longs * BITS_PER_LONG);
290 if (groups_allocated && err) {
291 for (i = 0; i < family->n_mcgrps; i++)
292 clear_bit(family->mcgrp_offset + i, mc_groups);
298 static void genl_unregister_mc_groups(struct genl_family *family)
303 netlink_table_grab();
305 for_each_net_rcu(net) {
306 for (i = 0; i < family->n_mcgrps; i++)
307 __netlink_clear_multicast_users(
308 net->genl_sock, family->mcgrp_offset + i);
311 netlink_table_ungrab();
313 for (i = 0; i < family->n_mcgrps; i++) {
314 int grp_id = family->mcgrp_offset + i;
317 clear_bit(grp_id, mc_groups);
318 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
319 &family->mcgrps[i], grp_id);
323 static int genl_validate_ops(const struct genl_family *family)
325 const struct genl_ops *ops = family->ops;
326 unsigned int n_ops = family->n_ops;
329 if (WARN_ON(n_ops && !ops))
335 for (i = 0; i < n_ops; i++) {
336 if (ops[i].dumpit == NULL && ops[i].doit == NULL)
338 for (j = i + 1; j < n_ops; j++)
339 if (ops[i].cmd == ops[j].cmd)
347 * __genl_register_family - register a generic netlink family
348 * @family: generic netlink family
350 * Registers the specified family after validating it first. Only one
351 * family may be registered with the same family name or identifier.
352 * The family id may equal GENL_ID_GENERATE causing an unique id to
353 * be automatically generated and assigned.
355 * The family's ops array must already be assigned, you can use the
356 * genl_register_family_with_ops() helper function.
358 * Return 0 on success or a negative error code.
360 int __genl_register_family(struct genl_family *family)
362 int err = -EINVAL, i;
364 if (family->id && family->id < GENL_MIN_ID)
367 if (family->id > GENL_MAX_ID)
370 err = genl_validate_ops(family);
376 if (genl_family_find_byname(family->name)) {
381 if (family->id == GENL_ID_GENERATE) {
382 u16 newid = genl_generate_id();
390 } else if (genl_family_find_byid(family->id)) {
395 if (family->maxattr && !family->parallel_ops) {
396 family->attrbuf = kmalloc((family->maxattr+1) *
397 sizeof(struct nlattr *), GFP_KERNEL);
398 if (family->attrbuf == NULL) {
403 family->attrbuf = NULL;
405 err = genl_validate_assign_mc_groups(family);
409 list_add_tail(&family->family_list, genl_family_chain(family->id));
412 /* send all events */
413 genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0);
414 for (i = 0; i < family->n_mcgrps; i++)
415 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family,
416 &family->mcgrps[i], family->mcgrp_offset + i);
425 EXPORT_SYMBOL(__genl_register_family);
428 * genl_unregister_family - unregister generic netlink family
429 * @family: generic netlink family
431 * Unregisters the specified family.
433 * Returns 0 on success or a negative error code.
435 int genl_unregister_family(struct genl_family *family)
437 struct genl_family *rc;
441 list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
442 if (family->id != rc->id || strcmp(rc->name, family->name))
445 genl_unregister_mc_groups(family);
447 list_del(&rc->family_list);
450 wait_event(genl_sk_destructing_waitq,
451 atomic_read(&genl_sk_destructing_cnt) == 0);
454 kfree(family->attrbuf);
455 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
463 EXPORT_SYMBOL(genl_unregister_family);
466 * genlmsg_put - Add generic netlink header to netlink message
467 * @skb: socket buffer holding the message
468 * @portid: netlink portid the message is addressed to
469 * @seq: sequence number (usually the one of the sender)
470 * @family: generic netlink family
471 * @flags: netlink message flags
472 * @cmd: generic netlink command
474 * Returns pointer to user specific header
476 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
477 struct genl_family *family, int flags, u8 cmd)
479 struct nlmsghdr *nlh;
480 struct genlmsghdr *hdr;
482 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
483 family->hdrsize, flags);
487 hdr = nlmsg_data(nlh);
489 hdr->version = family->version;
492 return (char *) hdr + GENL_HDRLEN;
494 EXPORT_SYMBOL(genlmsg_put);
496 static int genl_lock_start(struct netlink_callback *cb)
498 /* our ops are always const - netlink API doesn't propagate that */
499 const struct genl_ops *ops = cb->data;
510 static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
512 /* our ops are always const - netlink API doesn't propagate that */
513 const struct genl_ops *ops = cb->data;
517 rc = ops->dumpit(skb, cb);
522 static int genl_lock_done(struct netlink_callback *cb)
524 /* our ops are always const - netlink API doesn't propagate that */
525 const struct genl_ops *ops = cb->data;
536 static int genl_family_rcv_msg(struct genl_family *family,
538 struct nlmsghdr *nlh)
540 const struct genl_ops *ops;
541 struct net *net = sock_net(skb->sk);
542 struct genl_info info;
543 struct genlmsghdr *hdr = nlmsg_data(nlh);
544 struct nlattr **attrbuf;
547 /* this family doesn't exist in this netns */
548 if (!family->netnsok && !net_eq(net, &init_net))
551 hdrlen = GENL_HDRLEN + family->hdrsize;
552 if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
555 ops = genl_get_cmd(hdr->cmd, family);
559 if ((ops->flags & GENL_ADMIN_PERM) &&
560 !netlink_capable(skb, CAP_NET_ADMIN))
563 if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
564 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
567 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
570 if (ops->dumpit == NULL)
573 if (!family->parallel_ops) {
574 struct netlink_dump_control c = {
575 .module = family->module,
576 /* we have const, but the netlink API doesn't */
578 .start = genl_lock_start,
579 .dump = genl_lock_dumpit,
580 .done = genl_lock_done,
584 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
588 struct netlink_dump_control c = {
589 .module = family->module,
595 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
601 if (ops->doit == NULL)
604 if (family->maxattr && family->parallel_ops) {
605 attrbuf = kmalloc((family->maxattr+1) *
606 sizeof(struct nlattr *), GFP_KERNEL);
610 attrbuf = family->attrbuf;
613 err = nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
619 info.snd_seq = nlh->nlmsg_seq;
620 info.snd_portid = NETLINK_CB(skb).portid;
622 info.genlhdr = nlmsg_data(nlh);
623 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
624 info.attrs = attrbuf;
625 genl_info_net_set(&info, net);
626 memset(&info.user_ptr, 0, sizeof(info.user_ptr));
628 if (family->pre_doit) {
629 err = family->pre_doit(ops, skb, &info);
634 err = ops->doit(skb, &info);
636 if (family->post_doit)
637 family->post_doit(ops, skb, &info);
640 if (family->parallel_ops)
646 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
648 struct genl_family *family;
651 family = genl_family_find_byid(nlh->nlmsg_type);
655 if (!family->parallel_ops)
658 err = genl_family_rcv_msg(family, skb, nlh);
660 if (!family->parallel_ops)
666 static void genl_rcv(struct sk_buff *skb)
669 netlink_rcv_skb(skb, &genl_rcv_msg);
673 /**************************************************************************
675 **************************************************************************/
677 static struct genl_family genl_ctrl = {
681 .maxattr = CTRL_ATTR_MAX,
685 static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
686 u32 flags, struct sk_buff *skb, u8 cmd)
690 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
694 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
695 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
696 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
697 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
698 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
699 goto nla_put_failure;
702 struct nlattr *nla_ops;
705 nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS);
707 goto nla_put_failure;
709 for (i = 0; i < family->n_ops; i++) {
711 const struct genl_ops *ops = &family->ops[i];
712 u32 op_flags = ops->flags;
715 op_flags |= GENL_CMD_CAP_DUMP;
717 op_flags |= GENL_CMD_CAP_DO;
719 op_flags |= GENL_CMD_CAP_HASPOL;
721 nest = nla_nest_start(skb, i + 1);
723 goto nla_put_failure;
725 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
726 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
727 goto nla_put_failure;
729 nla_nest_end(skb, nest);
732 nla_nest_end(skb, nla_ops);
735 if (family->n_mcgrps) {
736 struct nlattr *nla_grps;
739 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
740 if (nla_grps == NULL)
741 goto nla_put_failure;
743 for (i = 0; i < family->n_mcgrps; i++) {
745 const struct genl_multicast_group *grp;
747 grp = &family->mcgrps[i];
749 nest = nla_nest_start(skb, i + 1);
751 goto nla_put_failure;
753 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID,
754 family->mcgrp_offset + i) ||
755 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
757 goto nla_put_failure;
759 nla_nest_end(skb, nest);
761 nla_nest_end(skb, nla_grps);
764 genlmsg_end(skb, hdr);
768 genlmsg_cancel(skb, hdr);
772 static int ctrl_fill_mcgrp_info(struct genl_family *family,
773 const struct genl_multicast_group *grp,
774 int grp_id, u32 portid, u32 seq, u32 flags,
775 struct sk_buff *skb, u8 cmd)
778 struct nlattr *nla_grps;
781 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
785 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
786 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id))
787 goto nla_put_failure;
789 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
790 if (nla_grps == NULL)
791 goto nla_put_failure;
793 nest = nla_nest_start(skb, 1);
795 goto nla_put_failure;
797 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) ||
798 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
800 goto nla_put_failure;
802 nla_nest_end(skb, nest);
803 nla_nest_end(skb, nla_grps);
805 genlmsg_end(skb, hdr);
809 genlmsg_cancel(skb, hdr);
813 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
817 struct genl_family *rt;
818 struct net *net = sock_net(skb->sk);
819 int chains_to_skip = cb->args[0];
820 int fams_to_skip = cb->args[1];
822 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
824 list_for_each_entry(rt, genl_family_chain(i), family_list) {
825 if (!rt->netnsok && !net_eq(net, &init_net))
827 if (++n < fams_to_skip)
829 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
830 cb->nlh->nlmsg_seq, NLM_F_MULTI,
831 skb, CTRL_CMD_NEWFAMILY) < 0)
845 static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
846 u32 portid, int seq, u8 cmd)
851 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
853 return ERR_PTR(-ENOBUFS);
855 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
864 static struct sk_buff *
865 ctrl_build_mcgrp_msg(struct genl_family *family,
866 const struct genl_multicast_group *grp,
867 int grp_id, u32 portid, int seq, u8 cmd)
872 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
874 return ERR_PTR(-ENOBUFS);
876 err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid,
886 static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
887 [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
888 [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
889 .len = GENL_NAMSIZ - 1 },
892 static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
895 struct genl_family *res = NULL;
898 if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
899 u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
900 res = genl_family_find_byid(id);
904 if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
907 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
908 res = genl_family_find_byname(name);
909 #ifdef CONFIG_MODULES
913 request_module("net-pf-%d-proto-%d-family-%s",
914 PF_NETLINK, NETLINK_GENERIC, name);
917 res = genl_family_find_byname(name);
926 if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
927 /* family doesn't exist here */
931 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
936 return genlmsg_reply(msg, info);
939 static int genl_ctrl_event(int event, struct genl_family *family,
940 const struct genl_multicast_group *grp,
945 /* genl is still initialising */
946 if (!init_net.genl_sock)
950 case CTRL_CMD_NEWFAMILY:
951 case CTRL_CMD_DELFAMILY:
953 msg = ctrl_build_family_msg(family, 0, 0, event);
955 case CTRL_CMD_NEWMCAST_GRP:
956 case CTRL_CMD_DELMCAST_GRP:
958 msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event);
967 if (!family->netnsok) {
968 genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
972 genlmsg_multicast_allns(&genl_ctrl, msg, 0,
980 static const struct genl_ops genl_ctrl_ops[] = {
982 .cmd = CTRL_CMD_GETFAMILY,
983 .doit = ctrl_getfamily,
984 .dumpit = ctrl_dumpfamily,
985 .policy = ctrl_policy,
989 static const struct genl_multicast_group genl_ctrl_groups[] = {
990 { .name = "notify", },
993 static int genl_bind(struct net *net, int group)
995 int i, err = -ENOENT;
998 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
999 struct genl_family *f;
1001 list_for_each_entry(f, genl_family_chain(i), family_list) {
1002 if (group >= f->mcgrp_offset &&
1003 group < f->mcgrp_offset + f->n_mcgrps) {
1004 int fam_grp = group - f->mcgrp_offset;
1006 if (!f->netnsok && net != &init_net)
1008 else if (f->mcast_bind)
1009 err = f->mcast_bind(net, fam_grp);
1021 static void genl_unbind(struct net *net, int group)
1025 down_read(&cb_lock);
1026 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
1027 struct genl_family *f;
1029 list_for_each_entry(f, genl_family_chain(i), family_list) {
1030 if (group >= f->mcgrp_offset &&
1031 group < f->mcgrp_offset + f->n_mcgrps) {
1032 int fam_grp = group - f->mcgrp_offset;
1034 if (f->mcast_unbind)
1035 f->mcast_unbind(net, fam_grp);
1043 static int __net_init genl_pernet_init(struct net *net)
1045 struct netlink_kernel_cfg cfg = {
1047 .flags = NL_CFG_F_NONROOT_RECV,
1049 .unbind = genl_unbind,
1052 /* we'll bump the group number right afterwards */
1053 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
1055 if (!net->genl_sock && net_eq(net, &init_net))
1056 panic("GENL: Cannot initialize generic netlink\n");
1058 if (!net->genl_sock)
1064 static void __net_exit genl_pernet_exit(struct net *net)
1066 netlink_kernel_release(net->genl_sock);
1067 net->genl_sock = NULL;
1070 static struct pernet_operations genl_pernet_ops = {
1071 .init = genl_pernet_init,
1072 .exit = genl_pernet_exit,
1075 static int __init genl_init(void)
1079 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
1080 INIT_LIST_HEAD(&family_ht[i]);
1082 err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops,
1087 err = register_pernet_subsys(&genl_pernet_ops);
1094 panic("GENL: Cannot register controller: %d\n", err);
1097 subsys_initcall(genl_init);
1099 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1102 struct sk_buff *tmp;
1103 struct net *net, *prev = NULL;
1106 for_each_net_rcu(net) {
1108 tmp = skb_clone(skb, flags);
1113 err = nlmsg_multicast(prev->genl_sock, tmp,
1114 portid, group, flags);
1122 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
1128 int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb,
1129 u32 portid, unsigned int group, gfp_t flags)
1131 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1133 group = family->mcgrp_offset + group;
1134 return genlmsg_mcast(skb, portid, group, flags);
1136 EXPORT_SYMBOL(genlmsg_multicast_allns);
1138 void genl_notify(struct genl_family *family, struct sk_buff *skb,
1139 struct genl_info *info, u32 group, gfp_t flags)
1141 struct net *net = genl_info_net(info);
1142 struct sock *sk = net->genl_sock;
1146 report = nlmsg_report(info->nlhdr);
1148 if (WARN_ON_ONCE(group >= family->n_mcgrps))
1150 group = family->mcgrp_offset + group;
1151 nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
1153 EXPORT_SYMBOL(genl_notify);