2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/string.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/fcntl.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/capability.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/security.h>
37 #include <linux/mutex.h>
38 #include <linux/if_addr.h>
39 #include <linux/if_bridge.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/etherdevice.h>
43 #include <linux/bpf.h>
45 #include <linux/uaccess.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <net/switchdev.h>
51 #include <net/protocol.h>
53 #include <net/route.h>
57 #include <net/pkt_sched.h>
58 #include <net/fib_rules.h>
59 #include <net/rtnetlink.h>
60 #include <net/net_namespace.h>
64 rtnl_dumpit_func dumpit;
65 rtnl_calcit_func calcit;
68 static DEFINE_MUTEX(rtnl_mutex);
72 mutex_lock(&rtnl_mutex);
74 EXPORT_SYMBOL(rtnl_lock);
76 static struct sk_buff *defer_kfree_skb_list;
77 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
80 tail->next = defer_kfree_skb_list;
81 defer_kfree_skb_list = head;
84 EXPORT_SYMBOL(rtnl_kfree_skbs);
86 void __rtnl_unlock(void)
88 struct sk_buff *head = defer_kfree_skb_list;
90 defer_kfree_skb_list = NULL;
92 mutex_unlock(&rtnl_mutex);
95 struct sk_buff *next = head->next;
103 void rtnl_unlock(void)
105 /* This fellow will unlock it for us. */
108 EXPORT_SYMBOL(rtnl_unlock);
110 int rtnl_trylock(void)
112 return mutex_trylock(&rtnl_mutex);
114 EXPORT_SYMBOL(rtnl_trylock);
116 int rtnl_is_locked(void)
118 return mutex_is_locked(&rtnl_mutex);
120 EXPORT_SYMBOL(rtnl_is_locked);
122 #ifdef CONFIG_PROVE_LOCKING
123 bool lockdep_rtnl_is_held(void)
125 return lockdep_is_held(&rtnl_mutex);
127 EXPORT_SYMBOL(lockdep_rtnl_is_held);
128 #endif /* #ifdef CONFIG_PROVE_LOCKING */
130 static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
132 static inline int rtm_msgindex(int msgtype)
134 int msgindex = msgtype - RTM_BASE;
137 * msgindex < 0 implies someone tried to register a netlink
138 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
139 * the message type has not been added to linux/rtnetlink.h
141 BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
146 static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
148 struct rtnl_link *tab;
150 if (protocol <= RTNL_FAMILY_MAX)
151 tab = rtnl_msg_handlers[protocol];
155 if (tab == NULL || tab[msgindex].doit == NULL)
156 tab = rtnl_msg_handlers[PF_UNSPEC];
158 return tab[msgindex].doit;
161 static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
163 struct rtnl_link *tab;
165 if (protocol <= RTNL_FAMILY_MAX)
166 tab = rtnl_msg_handlers[protocol];
170 if (tab == NULL || tab[msgindex].dumpit == NULL)
171 tab = rtnl_msg_handlers[PF_UNSPEC];
173 return tab[msgindex].dumpit;
176 static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
178 struct rtnl_link *tab;
180 if (protocol <= RTNL_FAMILY_MAX)
181 tab = rtnl_msg_handlers[protocol];
185 if (tab == NULL || tab[msgindex].calcit == NULL)
186 tab = rtnl_msg_handlers[PF_UNSPEC];
188 return tab[msgindex].calcit;
192 * __rtnl_register - Register a rtnetlink message type
193 * @protocol: Protocol family or PF_UNSPEC
194 * @msgtype: rtnetlink message type
195 * @doit: Function pointer called for each request message
196 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
197 * @calcit: Function pointer to calc size of dump message
199 * Registers the specified function pointers (at least one of them has
200 * to be non-NULL) to be called whenever a request message for the
201 * specified protocol family and message type is received.
203 * The special protocol family PF_UNSPEC may be used to define fallback
204 * function pointers for the case when no entry for the specific protocol
207 * Returns 0 on success or a negative error code.
209 int __rtnl_register(int protocol, int msgtype,
210 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
211 rtnl_calcit_func calcit)
213 struct rtnl_link *tab;
216 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
217 msgindex = rtm_msgindex(msgtype);
219 tab = rtnl_msg_handlers[protocol];
221 tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
225 rtnl_msg_handlers[protocol] = tab;
229 tab[msgindex].doit = doit;
232 tab[msgindex].dumpit = dumpit;
235 tab[msgindex].calcit = calcit;
239 EXPORT_SYMBOL_GPL(__rtnl_register);
242 * rtnl_register - Register a rtnetlink message type
244 * Identical to __rtnl_register() but panics on failure. This is useful
245 * as failure of this function is very unlikely, it can only happen due
246 * to lack of memory when allocating the chain to store all message
247 * handlers for a protocol. Meant for use in init functions where lack
248 * of memory implies no sense in continuing.
250 void rtnl_register(int protocol, int msgtype,
251 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
252 rtnl_calcit_func calcit)
254 if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
255 panic("Unable to register rtnetlink message handler, "
256 "protocol = %d, message type = %d\n",
259 EXPORT_SYMBOL_GPL(rtnl_register);
262 * rtnl_unregister - Unregister a rtnetlink message type
263 * @protocol: Protocol family or PF_UNSPEC
264 * @msgtype: rtnetlink message type
266 * Returns 0 on success or a negative error code.
268 int rtnl_unregister(int protocol, int msgtype)
272 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
273 msgindex = rtm_msgindex(msgtype);
275 if (rtnl_msg_handlers[protocol] == NULL)
278 rtnl_msg_handlers[protocol][msgindex].doit = NULL;
279 rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
280 rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
284 EXPORT_SYMBOL_GPL(rtnl_unregister);
287 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
288 * @protocol : Protocol family or PF_UNSPEC
290 * Identical to calling rtnl_unregster() for all registered message types
291 * of a certain protocol family.
293 void rtnl_unregister_all(int protocol)
295 BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
297 kfree(rtnl_msg_handlers[protocol]);
298 rtnl_msg_handlers[protocol] = NULL;
300 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
302 static LIST_HEAD(link_ops);
304 static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
306 const struct rtnl_link_ops *ops;
308 list_for_each_entry(ops, &link_ops, list) {
309 if (!strcmp(ops->kind, kind))
316 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
317 * @ops: struct rtnl_link_ops * to register
319 * The caller must hold the rtnl_mutex. This function should be used
320 * by drivers that create devices during module initialization. It
321 * must be called before registering the devices.
323 * Returns 0 on success or a negative error code.
325 int __rtnl_link_register(struct rtnl_link_ops *ops)
327 if (rtnl_link_ops_get(ops->kind))
330 /* The check for setup is here because if ops
331 * does not have that filled up, it is not possible
332 * to use the ops for creating device. So do not
333 * fill up dellink as well. That disables rtnl_dellink.
335 if (ops->setup && !ops->dellink)
336 ops->dellink = unregister_netdevice_queue;
338 list_add_tail(&ops->list, &link_ops);
341 EXPORT_SYMBOL_GPL(__rtnl_link_register);
344 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
345 * @ops: struct rtnl_link_ops * to register
347 * Returns 0 on success or a negative error code.
349 int rtnl_link_register(struct rtnl_link_ops *ops)
354 err = __rtnl_link_register(ops);
358 EXPORT_SYMBOL_GPL(rtnl_link_register);
360 static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
362 struct net_device *dev;
363 LIST_HEAD(list_kill);
365 for_each_netdev(net, dev) {
366 if (dev->rtnl_link_ops == ops)
367 ops->dellink(dev, &list_kill);
369 unregister_netdevice_many(&list_kill);
373 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
374 * @ops: struct rtnl_link_ops * to unregister
376 * The caller must hold the rtnl_mutex.
378 void __rtnl_link_unregister(struct rtnl_link_ops *ops)
383 __rtnl_kill_links(net, ops);
385 list_del(&ops->list);
387 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
389 /* Return with the rtnl_lock held when there are no network
390 * devices unregistering in any network namespace.
392 static void rtnl_lock_unregistering_all(void)
396 DEFINE_WAIT_FUNC(wait, woken_wake_function);
398 add_wait_queue(&netdev_unregistering_wq, &wait);
400 unregistering = false;
403 if (net->dev_unreg_count > 0) {
404 unregistering = true;
412 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
414 remove_wait_queue(&netdev_unregistering_wq, &wait);
418 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
419 * @ops: struct rtnl_link_ops * to unregister
421 void rtnl_link_unregister(struct rtnl_link_ops *ops)
423 /* Close the race with cleanup_net() */
424 mutex_lock(&net_mutex);
425 rtnl_lock_unregistering_all();
426 __rtnl_link_unregister(ops);
428 mutex_unlock(&net_mutex);
430 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
432 static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
434 struct net_device *master_dev;
435 const struct rtnl_link_ops *ops;
437 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
440 ops = master_dev->rtnl_link_ops;
441 if (!ops || !ops->get_slave_size)
443 /* IFLA_INFO_SLAVE_DATA + nested data */
444 return nla_total_size(sizeof(struct nlattr)) +
445 ops->get_slave_size(master_dev, dev);
448 static size_t rtnl_link_get_size(const struct net_device *dev)
450 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
456 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
457 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
460 /* IFLA_INFO_DATA + nested data */
461 size += nla_total_size(sizeof(struct nlattr)) +
464 if (ops->get_xstats_size)
465 /* IFLA_INFO_XSTATS */
466 size += nla_total_size(ops->get_xstats_size(dev));
468 size += rtnl_link_get_slave_info_data_size(dev);
473 static LIST_HEAD(rtnl_af_ops);
475 static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
477 const struct rtnl_af_ops *ops;
479 list_for_each_entry(ops, &rtnl_af_ops, list) {
480 if (ops->family == family)
488 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
489 * @ops: struct rtnl_af_ops * to register
491 * Returns 0 on success or a negative error code.
493 void rtnl_af_register(struct rtnl_af_ops *ops)
496 list_add_tail(&ops->list, &rtnl_af_ops);
499 EXPORT_SYMBOL_GPL(rtnl_af_register);
502 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
503 * @ops: struct rtnl_af_ops * to unregister
505 * The caller must hold the rtnl_mutex.
507 void __rtnl_af_unregister(struct rtnl_af_ops *ops)
509 list_del(&ops->list);
511 EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
514 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
515 * @ops: struct rtnl_af_ops * to unregister
517 void rtnl_af_unregister(struct rtnl_af_ops *ops)
520 __rtnl_af_unregister(ops);
523 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
525 static size_t rtnl_link_get_af_size(const struct net_device *dev,
528 struct rtnl_af_ops *af_ops;
532 size = nla_total_size(sizeof(struct nlattr));
534 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
535 if (af_ops->get_link_af_size) {
536 /* AF_* + nested data */
537 size += nla_total_size(sizeof(struct nlattr)) +
538 af_ops->get_link_af_size(dev, ext_filter_mask);
545 static bool rtnl_have_link_slave_info(const struct net_device *dev)
547 struct net_device *master_dev;
549 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
550 if (master_dev && master_dev->rtnl_link_ops)
555 static int rtnl_link_slave_info_fill(struct sk_buff *skb,
556 const struct net_device *dev)
558 struct net_device *master_dev;
559 const struct rtnl_link_ops *ops;
560 struct nlattr *slave_data;
563 master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
566 ops = master_dev->rtnl_link_ops;
569 if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
571 if (ops->fill_slave_info) {
572 slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
575 err = ops->fill_slave_info(skb, master_dev, dev);
577 goto err_cancel_slave_data;
578 nla_nest_end(skb, slave_data);
582 err_cancel_slave_data:
583 nla_nest_cancel(skb, slave_data);
587 static int rtnl_link_info_fill(struct sk_buff *skb,
588 const struct net_device *dev)
590 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
596 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
598 if (ops->fill_xstats) {
599 err = ops->fill_xstats(skb, dev);
603 if (ops->fill_info) {
604 data = nla_nest_start(skb, IFLA_INFO_DATA);
607 err = ops->fill_info(skb, dev);
609 goto err_cancel_data;
610 nla_nest_end(skb, data);
615 nla_nest_cancel(skb, data);
619 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
621 struct nlattr *linkinfo;
624 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
625 if (linkinfo == NULL)
628 err = rtnl_link_info_fill(skb, dev);
630 goto err_cancel_link;
632 err = rtnl_link_slave_info_fill(skb, dev);
634 goto err_cancel_link;
636 nla_nest_end(skb, linkinfo);
640 nla_nest_cancel(skb, linkinfo);
645 int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
647 struct sock *rtnl = net->rtnl;
650 NETLINK_CB(skb).dst_group = group;
652 refcount_inc(&skb->users);
653 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
655 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
659 int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
661 struct sock *rtnl = net->rtnl;
663 return nlmsg_unicast(rtnl, skb, pid);
665 EXPORT_SYMBOL(rtnl_unicast);
667 void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
668 struct nlmsghdr *nlh, gfp_t flags)
670 struct sock *rtnl = net->rtnl;
674 report = nlmsg_report(nlh);
676 nlmsg_notify(rtnl, skb, pid, group, report, flags);
678 EXPORT_SYMBOL(rtnl_notify);
680 void rtnl_set_sk_err(struct net *net, u32 group, int error)
682 struct sock *rtnl = net->rtnl;
684 netlink_set_err(rtnl, 0, group, error);
686 EXPORT_SYMBOL(rtnl_set_sk_err);
688 int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
693 mx = nla_nest_start(skb, RTA_METRICS);
697 for (i = 0; i < RTAX_MAX; i++) {
699 if (i == RTAX_CC_ALGO - 1) {
700 char tmp[TCP_CA_NAME_MAX], *name;
702 name = tcp_ca_get_name_by_key(metrics[i], tmp);
705 if (nla_put_string(skb, i + 1, name))
706 goto nla_put_failure;
707 } else if (i == RTAX_FEATURES - 1) {
708 u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
712 BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
713 if (nla_put_u32(skb, i + 1, user_features))
714 goto nla_put_failure;
716 if (nla_put_u32(skb, i + 1, metrics[i]))
717 goto nla_put_failure;
724 nla_nest_cancel(skb, mx);
728 return nla_nest_end(skb, mx);
731 nla_nest_cancel(skb, mx);
734 EXPORT_SYMBOL(rtnetlink_put_metrics);
736 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
737 long expires, u32 error)
739 struct rta_cacheinfo ci = {
740 .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
741 .rta_used = dst->__use,
742 .rta_clntref = atomic_read(&(dst->__refcnt)),
750 clock = jiffies_to_clock_t(abs(expires));
751 clock = min_t(unsigned long, clock, INT_MAX);
752 ci.rta_expires = (expires > 0) ? clock : -clock;
754 return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
756 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
758 static void set_operstate(struct net_device *dev, unsigned char transition)
760 unsigned char operstate = dev->operstate;
762 switch (transition) {
764 if ((operstate == IF_OPER_DORMANT ||
765 operstate == IF_OPER_UNKNOWN) &&
767 operstate = IF_OPER_UP;
770 case IF_OPER_DORMANT:
771 if (operstate == IF_OPER_UP ||
772 operstate == IF_OPER_UNKNOWN)
773 operstate = IF_OPER_DORMANT;
777 if (dev->operstate != operstate) {
778 write_lock_bh(&dev_base_lock);
779 dev->operstate = operstate;
780 write_unlock_bh(&dev_base_lock);
781 netdev_state_change(dev);
785 static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
787 return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
788 (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
791 static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
792 const struct ifinfomsg *ifm)
794 unsigned int flags = ifm->ifi_flags;
796 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
798 flags = (flags & ifm->ifi_change) |
799 (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
804 static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
805 const struct rtnl_link_stats64 *b)
807 a->rx_packets = b->rx_packets;
808 a->tx_packets = b->tx_packets;
809 a->rx_bytes = b->rx_bytes;
810 a->tx_bytes = b->tx_bytes;
811 a->rx_errors = b->rx_errors;
812 a->tx_errors = b->tx_errors;
813 a->rx_dropped = b->rx_dropped;
814 a->tx_dropped = b->tx_dropped;
816 a->multicast = b->multicast;
817 a->collisions = b->collisions;
819 a->rx_length_errors = b->rx_length_errors;
820 a->rx_over_errors = b->rx_over_errors;
821 a->rx_crc_errors = b->rx_crc_errors;
822 a->rx_frame_errors = b->rx_frame_errors;
823 a->rx_fifo_errors = b->rx_fifo_errors;
824 a->rx_missed_errors = b->rx_missed_errors;
826 a->tx_aborted_errors = b->tx_aborted_errors;
827 a->tx_carrier_errors = b->tx_carrier_errors;
828 a->tx_fifo_errors = b->tx_fifo_errors;
829 a->tx_heartbeat_errors = b->tx_heartbeat_errors;
830 a->tx_window_errors = b->tx_window_errors;
832 a->rx_compressed = b->rx_compressed;
833 a->tx_compressed = b->tx_compressed;
835 a->rx_nohandler = b->rx_nohandler;
839 static inline int rtnl_vfinfo_size(const struct net_device *dev,
842 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
843 int num_vfs = dev_num_vf(dev->dev.parent);
844 size_t size = nla_total_size(0);
847 nla_total_size(sizeof(struct ifla_vf_mac)) +
848 nla_total_size(sizeof(struct ifla_vf_vlan)) +
849 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
850 nla_total_size(MAX_VLAN_LIST_LEN *
851 sizeof(struct ifla_vf_vlan_info)) +
852 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
853 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
854 nla_total_size(sizeof(struct ifla_vf_rate)) +
855 nla_total_size(sizeof(struct ifla_vf_link_state)) +
856 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
857 nla_total_size(0) + /* nest IFLA_VF_STATS */
858 /* IFLA_VF_STATS_RX_PACKETS */
859 nla_total_size_64bit(sizeof(__u64)) +
860 /* IFLA_VF_STATS_TX_PACKETS */
861 nla_total_size_64bit(sizeof(__u64)) +
862 /* IFLA_VF_STATS_RX_BYTES */
863 nla_total_size_64bit(sizeof(__u64)) +
864 /* IFLA_VF_STATS_TX_BYTES */
865 nla_total_size_64bit(sizeof(__u64)) +
866 /* IFLA_VF_STATS_BROADCAST */
867 nla_total_size_64bit(sizeof(__u64)) +
868 /* IFLA_VF_STATS_MULTICAST */
869 nla_total_size_64bit(sizeof(__u64)) +
870 nla_total_size(sizeof(struct ifla_vf_trust)));
876 static size_t rtnl_port_size(const struct net_device *dev,
879 size_t port_size = nla_total_size(4) /* PORT_VF */
880 + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
881 + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
882 + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
883 + nla_total_size(1) /* PROT_VDP_REQUEST */
884 + nla_total_size(2); /* PORT_VDP_RESPONSE */
885 size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
886 size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
888 size_t port_self_size = nla_total_size(sizeof(struct nlattr))
891 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
892 !(ext_filter_mask & RTEXT_FILTER_VF))
894 if (dev_num_vf(dev->dev.parent))
895 return port_self_size + vf_ports_size +
896 vf_port_size * dev_num_vf(dev->dev.parent);
898 return port_self_size;
901 static size_t rtnl_xdp_size(void)
903 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
904 nla_total_size(1) + /* XDP_ATTACHED */
905 nla_total_size(4); /* XDP_PROG_ID */
910 static noinline size_t if_nlmsg_size(const struct net_device *dev,
913 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
914 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
915 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
916 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
917 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
918 + nla_total_size(sizeof(struct rtnl_link_stats))
919 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
920 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
921 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
922 + nla_total_size(4) /* IFLA_TXQLEN */
923 + nla_total_size(4) /* IFLA_WEIGHT */
924 + nla_total_size(4) /* IFLA_MTU */
925 + nla_total_size(4) /* IFLA_LINK */
926 + nla_total_size(4) /* IFLA_MASTER */
927 + nla_total_size(1) /* IFLA_CARRIER */
928 + nla_total_size(4) /* IFLA_PROMISCUITY */
929 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
930 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
931 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
932 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
933 + nla_total_size(1) /* IFLA_OPERSTATE */
934 + nla_total_size(1) /* IFLA_LINKMODE */
935 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
936 + nla_total_size(4) /* IFLA_LINK_NETNSID */
937 + nla_total_size(4) /* IFLA_GROUP */
938 + nla_total_size(ext_filter_mask
939 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
940 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
941 + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
942 + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
943 + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
944 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
945 + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
946 + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
947 + rtnl_xdp_size() /* IFLA_XDP */
948 + nla_total_size(4) /* IFLA_EVENT */
949 + nla_total_size(1); /* IFLA_PROTO_DOWN */
953 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
955 struct nlattr *vf_ports;
956 struct nlattr *vf_port;
960 vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
964 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
965 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
967 goto nla_put_failure;
968 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
969 goto nla_put_failure;
970 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
971 if (err == -EMSGSIZE)
972 goto nla_put_failure;
974 nla_nest_cancel(skb, vf_port);
977 nla_nest_end(skb, vf_port);
980 nla_nest_end(skb, vf_ports);
985 nla_nest_cancel(skb, vf_ports);
989 static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
991 struct nlattr *port_self;
994 port_self = nla_nest_start(skb, IFLA_PORT_SELF);
998 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
1000 nla_nest_cancel(skb, port_self);
1001 return (err == -EMSGSIZE) ? err : 0;
1004 nla_nest_end(skb, port_self);
1009 static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
1010 u32 ext_filter_mask)
1014 if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
1015 !(ext_filter_mask & RTEXT_FILTER_VF))
1018 err = rtnl_port_self_fill(skb, dev);
1022 if (dev_num_vf(dev->dev.parent)) {
1023 err = rtnl_vf_ports_fill(skb, dev);
1031 static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
1034 struct netdev_phys_item_id ppid;
1036 err = dev_get_phys_port_id(dev, &ppid);
1038 if (err == -EOPNOTSUPP)
1043 if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
1049 static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1051 char name[IFNAMSIZ];
1054 err = dev_get_phys_port_name(dev, name, sizeof(name));
1056 if (err == -EOPNOTSUPP)
1061 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1067 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1070 struct switchdev_attr attr = {
1072 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1073 .flags = SWITCHDEV_F_NO_RECURSE,
1076 err = switchdev_port_attr_get(dev, &attr);
1078 if (err == -EOPNOTSUPP)
1083 if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
1090 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1091 struct net_device *dev)
1093 struct rtnl_link_stats64 *sp;
1094 struct nlattr *attr;
1096 attr = nla_reserve_64bit(skb, IFLA_STATS64,
1097 sizeof(struct rtnl_link_stats64), IFLA_PAD);
1101 sp = nla_data(attr);
1102 dev_get_stats(dev, sp);
1104 attr = nla_reserve(skb, IFLA_STATS,
1105 sizeof(struct rtnl_link_stats));
1109 copy_rtnl_link_stats(nla_data(attr), sp);
1114 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1115 struct net_device *dev,
1117 struct nlattr *vfinfo)
1119 struct ifla_vf_rss_query_en vf_rss_query_en;
1120 struct nlattr *vf, *vfstats, *vfvlanlist;
1121 struct ifla_vf_link_state vf_linkstate;
1122 struct ifla_vf_vlan_info vf_vlan_info;
1123 struct ifla_vf_spoofchk vf_spoofchk;
1124 struct ifla_vf_tx_rate vf_tx_rate;
1125 struct ifla_vf_stats vf_stats;
1126 struct ifla_vf_trust vf_trust;
1127 struct ifla_vf_vlan vf_vlan;
1128 struct ifla_vf_rate vf_rate;
1129 struct ifla_vf_mac vf_mac;
1130 struct ifla_vf_info ivi;
1132 memset(&ivi, 0, sizeof(ivi));
1134 /* Not all SR-IOV capable drivers support the
1135 * spoofcheck and "RSS query enable" query. Preset to
1136 * -1 so the user space tool can detect that the driver
1137 * didn't report anything.
1140 ivi.rss_query_en = -1;
1142 /* The default value for VF link state is "auto"
1143 * IFLA_VF_LINK_STATE_AUTO which equals zero
1146 /* VLAN Protocol by default is 802.1Q */
1147 ivi.vlan_proto = htons(ETH_P_8021Q);
1148 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1151 memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
1160 vf_rss_query_en.vf =
1161 vf_trust.vf = ivi.vf;
1163 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1164 vf_vlan.vlan = ivi.vlan;
1165 vf_vlan.qos = ivi.qos;
1166 vf_vlan_info.vlan = ivi.vlan;
1167 vf_vlan_info.qos = ivi.qos;
1168 vf_vlan_info.vlan_proto = ivi.vlan_proto;
1169 vf_tx_rate.rate = ivi.max_tx_rate;
1170 vf_rate.min_tx_rate = ivi.min_tx_rate;
1171 vf_rate.max_tx_rate = ivi.max_tx_rate;
1172 vf_spoofchk.setting = ivi.spoofchk;
1173 vf_linkstate.link_state = ivi.linkstate;
1174 vf_rss_query_en.setting = ivi.rss_query_en;
1175 vf_trust.setting = ivi.trusted;
1176 vf = nla_nest_start(skb, IFLA_VF_INFO);
1178 goto nla_put_vfinfo_failure;
1179 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1180 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1181 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1183 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1185 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1187 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1189 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1190 sizeof(vf_rss_query_en),
1191 &vf_rss_query_en) ||
1192 nla_put(skb, IFLA_VF_TRUST,
1193 sizeof(vf_trust), &vf_trust))
1194 goto nla_put_vf_failure;
1195 vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
1197 goto nla_put_vf_failure;
1198 if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
1200 nla_nest_cancel(skb, vfvlanlist);
1201 goto nla_put_vf_failure;
1203 nla_nest_end(skb, vfvlanlist);
1204 memset(&vf_stats, 0, sizeof(vf_stats));
1205 if (dev->netdev_ops->ndo_get_vf_stats)
1206 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1208 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1210 goto nla_put_vf_failure;
1211 if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
1212 vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
1213 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
1214 vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
1215 nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
1216 vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
1217 nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
1218 vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
1219 nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
1220 vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
1221 nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
1222 vf_stats.multicast, IFLA_VF_STATS_PAD)) {
1223 nla_nest_cancel(skb, vfstats);
1224 goto nla_put_vf_failure;
1226 nla_nest_end(skb, vfstats);
1227 nla_nest_end(skb, vf);
1231 nla_nest_cancel(skb, vf);
1232 nla_put_vfinfo_failure:
1233 nla_nest_cancel(skb, vfinfo);
1237 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1239 struct rtnl_link_ifmap map;
1241 memset(&map, 0, sizeof(map));
1242 map.mem_start = dev->mem_start;
1243 map.mem_end = dev->mem_end;
1244 map.base_addr = dev->base_addr;
1247 map.port = dev->if_port;
1249 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
1255 static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
1257 const struct net_device_ops *ops = dev->netdev_ops;
1258 const struct bpf_prog *generic_xdp_prog;
1263 generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
1264 if (generic_xdp_prog) {
1265 *prog_id = generic_xdp_prog->aux->id;
1266 return XDP_ATTACHED_SKB;
1269 return XDP_ATTACHED_NONE;
1271 return __dev_xdp_attached(dev, ops->ndo_xdp, prog_id);
1274 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1280 xdp = nla_nest_start(skb, IFLA_XDP);
1284 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1285 rtnl_xdp_attached_mode(dev, &prog_id));
1290 err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
1295 nla_nest_end(skb, xdp);
1299 nla_nest_cancel(skb, xdp);
1303 static u32 rtnl_get_event(unsigned long event)
1305 u32 rtnl_event_type = IFLA_EVENT_NONE;
1309 rtnl_event_type = IFLA_EVENT_REBOOT;
1311 case NETDEV_FEAT_CHANGE:
1312 rtnl_event_type = IFLA_EVENT_FEATURES;
1314 case NETDEV_BONDING_FAILOVER:
1315 rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
1317 case NETDEV_NOTIFY_PEERS:
1318 rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
1320 case NETDEV_RESEND_IGMP:
1321 rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
1323 case NETDEV_CHANGEINFODATA:
1324 rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
1330 return rtnl_event_type;
1333 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1334 int type, u32 pid, u32 seq, u32 change,
1335 unsigned int flags, u32 ext_filter_mask,
1338 struct ifinfomsg *ifm;
1339 struct nlmsghdr *nlh;
1340 struct nlattr *af_spec;
1341 struct rtnl_af_ops *af_ops;
1342 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1345 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
1349 ifm = nlmsg_data(nlh);
1350 ifm->ifi_family = AF_UNSPEC;
1352 ifm->ifi_type = dev->type;
1353 ifm->ifi_index = dev->ifindex;
1354 ifm->ifi_flags = dev_get_flags(dev);
1355 ifm->ifi_change = change;
1357 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
1358 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
1359 nla_put_u8(skb, IFLA_OPERSTATE,
1360 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
1361 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
1362 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
1363 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1364 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1365 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1366 nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
1367 nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
1369 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1371 (dev->ifindex != dev_get_iflink(dev) &&
1372 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
1374 nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
1375 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1377 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
1379 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
1380 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1381 atomic_read(&dev->carrier_changes)) ||
1382 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1383 goto nla_put_failure;
1385 if (event != IFLA_EVENT_NONE) {
1386 if (nla_put_u32(skb, IFLA_EVENT, event))
1387 goto nla_put_failure;
1390 if (rtnl_fill_link_ifmap(skb, dev))
1391 goto nla_put_failure;
1393 if (dev->addr_len) {
1394 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
1395 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
1396 goto nla_put_failure;
1399 if (rtnl_phys_port_id_fill(skb, dev))
1400 goto nla_put_failure;
1402 if (rtnl_phys_port_name_fill(skb, dev))
1403 goto nla_put_failure;
1405 if (rtnl_phys_switch_id_fill(skb, dev))
1406 goto nla_put_failure;
1408 if (rtnl_fill_stats(skb, dev))
1409 goto nla_put_failure;
1411 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
1412 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
1413 goto nla_put_failure;
1415 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
1416 ext_filter_mask & RTEXT_FILTER_VF) {
1418 struct nlattr *vfinfo;
1419 int num_vfs = dev_num_vf(dev->dev.parent);
1421 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1423 goto nla_put_failure;
1424 for (i = 0; i < num_vfs; i++) {
1425 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1426 goto nla_put_failure;
1429 nla_nest_end(skb, vfinfo);
1432 if (rtnl_port_fill(skb, dev, ext_filter_mask))
1433 goto nla_put_failure;
1435 if (rtnl_xdp_fill(skb, dev))
1436 goto nla_put_failure;
1438 if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1439 if (rtnl_link_fill(skb, dev) < 0)
1440 goto nla_put_failure;
1443 if (dev->rtnl_link_ops &&
1444 dev->rtnl_link_ops->get_link_net) {
1445 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
1447 if (!net_eq(dev_net(dev), link_net)) {
1448 int id = peernet2id_alloc(dev_net(dev), link_net);
1450 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
1451 goto nla_put_failure;
1455 if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
1456 goto nla_put_failure;
1458 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
1459 if (af_ops->fill_link_af) {
1463 if (!(af = nla_nest_start(skb, af_ops->family)))
1464 goto nla_put_failure;
1466 err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1469 * Caller may return ENODATA to indicate that there
1470 * was no data to be dumped. This is not an error, it
1471 * means we should trim the attribute header and
1474 if (err == -ENODATA)
1475 nla_nest_cancel(skb, af);
1477 goto nla_put_failure;
1479 nla_nest_end(skb, af);
1483 nla_nest_end(skb, af_spec);
1485 nlmsg_end(skb, nlh);
1489 nlmsg_cancel(skb, nlh);
1493 static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1494 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1495 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1496 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1497 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
1498 [IFLA_MTU] = { .type = NLA_U32 },
1499 [IFLA_LINK] = { .type = NLA_U32 },
1500 [IFLA_MASTER] = { .type = NLA_U32 },
1501 [IFLA_CARRIER] = { .type = NLA_U8 },
1502 [IFLA_TXQLEN] = { .type = NLA_U32 },
1503 [IFLA_WEIGHT] = { .type = NLA_U32 },
1504 [IFLA_OPERSTATE] = { .type = NLA_U8 },
1505 [IFLA_LINKMODE] = { .type = NLA_U8 },
1506 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1507 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1508 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1509 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
1510 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1511 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1512 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1513 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1514 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1515 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1516 [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
1517 [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
1518 [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1519 [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
1520 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1521 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1522 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1523 [IFLA_XDP] = { .type = NLA_NESTED },
1524 [IFLA_EVENT] = { .type = NLA_U32 },
1525 [IFLA_GROUP] = { .type = NLA_U32 },
1528 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1529 [IFLA_INFO_KIND] = { .type = NLA_STRING },
1530 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
1531 [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
1532 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1535 static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1536 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1537 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
1538 [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
1539 [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
1540 [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
1541 [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
1542 [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
1543 [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
1544 [IFLA_VF_STATS] = { .type = NLA_NESTED },
1545 [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
1546 [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1547 [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
1550 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
1551 [IFLA_PORT_VF] = { .type = NLA_U32 },
1552 [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
1553 .len = PORT_PROFILE_MAX },
1554 [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
1555 .len = PORT_UUID_MAX },
1556 [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
1557 .len = PORT_UUID_MAX },
1558 [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
1559 [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
1561 /* Unused, but we need to keep it here since user space could
1562 * fill it. It's also broken with regard to NLA_BINARY use in
1563 * combination with structs.
1565 [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
1566 .len = sizeof(struct ifla_port_vsi) },
1569 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
1570 [IFLA_XDP_FD] = { .type = NLA_S32 },
1571 [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
1572 [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
1573 [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
1576 static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
1578 const struct rtnl_link_ops *ops = NULL;
1579 struct nlattr *linfo[IFLA_INFO_MAX + 1];
1581 if (nla_parse_nested(linfo, IFLA_INFO_MAX, nla,
1582 ifla_info_policy, NULL) < 0)
1585 if (linfo[IFLA_INFO_KIND]) {
1586 char kind[MODULE_NAME_LEN];
1588 nla_strlcpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
1589 ops = rtnl_link_ops_get(kind);
1595 static bool link_master_filtered(struct net_device *dev, int master_idx)
1597 struct net_device *master;
1602 master = netdev_master_upper_dev_get(dev);
1603 if (!master || master->ifindex != master_idx)
1609 static bool link_kind_filtered(const struct net_device *dev,
1610 const struct rtnl_link_ops *kind_ops)
1612 if (kind_ops && dev->rtnl_link_ops != kind_ops)
1618 static bool link_dump_filtered(struct net_device *dev,
1620 const struct rtnl_link_ops *kind_ops)
1622 if (link_master_filtered(dev, master_idx) ||
1623 link_kind_filtered(dev, kind_ops))
1629 static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1631 struct net *net = sock_net(skb->sk);
1634 struct net_device *dev;
1635 struct hlist_head *head;
1636 struct nlattr *tb[IFLA_MAX+1];
1637 u32 ext_filter_mask = 0;
1638 const struct rtnl_link_ops *kind_ops = NULL;
1639 unsigned int flags = NLM_F_MULTI;
1645 s_idx = cb->args[1];
1647 cb->seq = net->dev_base_seq;
1649 /* A hack to preserve kernel<->userspace interface.
1650 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1651 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1652 * what iproute2 < v3.9.0 used.
1653 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1654 * attribute, its netlink message is shorter than struct ifinfomsg.
1656 hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1657 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1659 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
1660 ifla_policy, NULL) >= 0) {
1661 if (tb[IFLA_EXT_MASK])
1662 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1664 if (tb[IFLA_MASTER])
1665 master_idx = nla_get_u32(tb[IFLA_MASTER]);
1667 if (tb[IFLA_LINKINFO])
1668 kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
1670 if (master_idx || kind_ops)
1671 flags |= NLM_F_DUMP_FILTERED;
1674 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1676 head = &net->dev_index_head[h];
1677 hlist_for_each_entry(dev, head, index_hlist) {
1678 if (link_dump_filtered(dev, master_idx, kind_ops))
1682 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1683 NETLINK_CB(cb->skb).portid,
1684 cb->nlh->nlmsg_seq, 0,
1686 ext_filter_mask, 0);
1689 if (likely(skb->len))
1695 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1709 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
1710 struct netlink_ext_ack *exterr)
1712 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy, exterr);
1714 EXPORT_SYMBOL(rtnl_nla_parse_ifla);
1716 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1719 /* Examine the link attributes and figure out which
1720 * network namespace we are talking about.
1722 if (tb[IFLA_NET_NS_PID])
1723 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1724 else if (tb[IFLA_NET_NS_FD])
1725 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1727 net = get_net(src_net);
1730 EXPORT_SYMBOL(rtnl_link_get_net);
1732 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1735 if (tb[IFLA_ADDRESS] &&
1736 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
1739 if (tb[IFLA_BROADCAST] &&
1740 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
1744 if (tb[IFLA_AF_SPEC]) {
1748 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
1749 const struct rtnl_af_ops *af_ops;
1751 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
1752 return -EAFNOSUPPORT;
1754 if (!af_ops->set_link_af)
1757 if (af_ops->validate_link_af) {
1758 err = af_ops->validate_link_af(dev, af);
1768 static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
1771 const struct net_device_ops *ops = dev->netdev_ops;
1773 return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
1776 static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
1778 if (dev->type != ARPHRD_INFINIBAND)
1781 return handle_infiniband_guid(dev, ivt, guid_type);
1784 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
1786 const struct net_device_ops *ops = dev->netdev_ops;
1789 if (tb[IFLA_VF_MAC]) {
1790 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
1793 if (ops->ndo_set_vf_mac)
1794 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1800 if (tb[IFLA_VF_VLAN]) {
1801 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
1804 if (ops->ndo_set_vf_vlan)
1805 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
1807 htons(ETH_P_8021Q));
1812 if (tb[IFLA_VF_VLAN_LIST]) {
1813 struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
1814 struct nlattr *attr;
1818 if (!ops->ndo_set_vf_vlan)
1821 nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
1822 if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
1823 nla_len(attr) < NLA_HDRLEN) {
1826 if (len >= MAX_VLAN_LIST_LEN)
1828 ivvl[len] = nla_data(attr);
1835 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
1836 ivvl[0]->qos, ivvl[0]->vlan_proto);
1841 if (tb[IFLA_VF_TX_RATE]) {
1842 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1843 struct ifla_vf_info ivf;
1846 if (ops->ndo_get_vf_config)
1847 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
1852 if (ops->ndo_set_vf_rate)
1853 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1860 if (tb[IFLA_VF_RATE]) {
1861 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
1864 if (ops->ndo_set_vf_rate)
1865 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1872 if (tb[IFLA_VF_SPOOFCHK]) {
1873 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
1876 if (ops->ndo_set_vf_spoofchk)
1877 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1883 if (tb[IFLA_VF_LINK_STATE]) {
1884 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
1887 if (ops->ndo_set_vf_link_state)
1888 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1894 if (tb[IFLA_VF_RSS_QUERY_EN]) {
1895 struct ifla_vf_rss_query_en *ivrssq_en;
1898 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
1899 if (ops->ndo_set_vf_rss_query_en)
1900 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
1901 ivrssq_en->setting);
1906 if (tb[IFLA_VF_TRUST]) {
1907 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
1910 if (ops->ndo_set_vf_trust)
1911 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
1916 if (tb[IFLA_VF_IB_NODE_GUID]) {
1917 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
1919 if (!ops->ndo_set_vf_guid)
1922 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
1925 if (tb[IFLA_VF_IB_PORT_GUID]) {
1926 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
1928 if (!ops->ndo_set_vf_guid)
1931 return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
1937 static int do_set_master(struct net_device *dev, int ifindex)
1939 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1940 const struct net_device_ops *ops;
1944 if (upper_dev->ifindex == ifindex)
1946 ops = upper_dev->netdev_ops;
1947 if (ops->ndo_del_slave) {
1948 err = ops->ndo_del_slave(upper_dev, dev);
1957 upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
1960 ops = upper_dev->netdev_ops;
1961 if (ops->ndo_add_slave) {
1962 err = ops->ndo_add_slave(upper_dev, dev);
1972 #define DO_SETLINK_MODIFIED 0x01
1973 /* notify flag means notify + modified. */
1974 #define DO_SETLINK_NOTIFY 0x03
1975 static int do_setlink(const struct sk_buff *skb,
1976 struct net_device *dev, struct ifinfomsg *ifm,
1977 struct netlink_ext_ack *extack,
1978 struct nlattr **tb, char *ifname, int status)
1980 const struct net_device_ops *ops = dev->netdev_ops;
1983 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1984 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1989 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1994 err = dev_change_net_namespace(dev, net, ifname);
1998 status |= DO_SETLINK_MODIFIED;
2002 struct rtnl_link_ifmap *u_map;
2005 if (!ops->ndo_set_config) {
2010 if (!netif_device_present(dev)) {
2015 u_map = nla_data(tb[IFLA_MAP]);
2016 k_map.mem_start = (unsigned long) u_map->mem_start;
2017 k_map.mem_end = (unsigned long) u_map->mem_end;
2018 k_map.base_addr = (unsigned short) u_map->base_addr;
2019 k_map.irq = (unsigned char) u_map->irq;
2020 k_map.dma = (unsigned char) u_map->dma;
2021 k_map.port = (unsigned char) u_map->port;
2023 err = ops->ndo_set_config(dev, &k_map);
2027 status |= DO_SETLINK_NOTIFY;
2030 if (tb[IFLA_ADDRESS]) {
2031 struct sockaddr *sa;
2034 len = sizeof(sa_family_t) + dev->addr_len;
2035 sa = kmalloc(len, GFP_KERNEL);
2040 sa->sa_family = dev->type;
2041 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
2043 err = dev_set_mac_address(dev, sa);
2047 status |= DO_SETLINK_MODIFIED;
2051 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2054 status |= DO_SETLINK_MODIFIED;
2057 if (tb[IFLA_GROUP]) {
2058 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2059 status |= DO_SETLINK_NOTIFY;
2063 * Interface selected by interface index but interface
2064 * name provided implies that a name change has been
2067 if (ifm->ifi_index > 0 && ifname[0]) {
2068 err = dev_change_name(dev, ifname);
2071 status |= DO_SETLINK_MODIFIED;
2074 if (tb[IFLA_IFALIAS]) {
2075 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
2076 nla_len(tb[IFLA_IFALIAS]));
2079 status |= DO_SETLINK_NOTIFY;
2082 if (tb[IFLA_BROADCAST]) {
2083 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
2084 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2087 if (ifm->ifi_flags || ifm->ifi_change) {
2088 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2093 if (tb[IFLA_MASTER]) {
2094 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
2097 status |= DO_SETLINK_MODIFIED;
2100 if (tb[IFLA_CARRIER]) {
2101 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
2104 status |= DO_SETLINK_MODIFIED;
2107 if (tb[IFLA_TXQLEN]) {
2108 unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
2109 unsigned int orig_len = dev->tx_queue_len;
2111 if (dev->tx_queue_len ^ value) {
2112 dev->tx_queue_len = value;
2113 err = call_netdevice_notifiers(
2114 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
2115 err = notifier_to_errno(err);
2117 dev->tx_queue_len = orig_len;
2120 status |= DO_SETLINK_NOTIFY;
2124 if (tb[IFLA_OPERSTATE])
2125 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2127 if (tb[IFLA_LINKMODE]) {
2128 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
2130 write_lock_bh(&dev_base_lock);
2131 if (dev->link_mode ^ value)
2132 status |= DO_SETLINK_NOTIFY;
2133 dev->link_mode = value;
2134 write_unlock_bh(&dev_base_lock);
2137 if (tb[IFLA_VFINFO_LIST]) {
2138 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
2139 struct nlattr *attr;
2142 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
2143 if (nla_type(attr) != IFLA_VF_INFO ||
2144 nla_len(attr) < NLA_HDRLEN) {
2148 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
2149 ifla_vf_policy, NULL);
2152 err = do_setvfinfo(dev, vfinfo);
2155 status |= DO_SETLINK_NOTIFY;
2160 if (tb[IFLA_VF_PORTS]) {
2161 struct nlattr *port[IFLA_PORT_MAX+1];
2162 struct nlattr *attr;
2167 if (!ops->ndo_set_vf_port)
2170 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
2171 if (nla_type(attr) != IFLA_VF_PORT ||
2172 nla_len(attr) < NLA_HDRLEN) {
2176 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
2177 ifla_port_policy, NULL);
2180 if (!port[IFLA_PORT_VF]) {
2184 vf = nla_get_u32(port[IFLA_PORT_VF]);
2185 err = ops->ndo_set_vf_port(dev, vf, port);
2188 status |= DO_SETLINK_NOTIFY;
2193 if (tb[IFLA_PORT_SELF]) {
2194 struct nlattr *port[IFLA_PORT_MAX+1];
2196 err = nla_parse_nested(port, IFLA_PORT_MAX,
2197 tb[IFLA_PORT_SELF], ifla_port_policy,
2203 if (ops->ndo_set_vf_port)
2204 err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
2207 status |= DO_SETLINK_NOTIFY;
2210 if (tb[IFLA_AF_SPEC]) {
2214 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
2215 const struct rtnl_af_ops *af_ops;
2217 if (!(af_ops = rtnl_af_lookup(nla_type(af))))
2220 err = af_ops->set_link_af(dev, af);
2224 status |= DO_SETLINK_NOTIFY;
2229 if (tb[IFLA_PROTO_DOWN]) {
2230 err = dev_change_proto_down(dev,
2231 nla_get_u8(tb[IFLA_PROTO_DOWN]));
2234 status |= DO_SETLINK_NOTIFY;
2238 struct nlattr *xdp[IFLA_XDP_MAX + 1];
2241 err = nla_parse_nested(xdp, IFLA_XDP_MAX, tb[IFLA_XDP],
2242 ifla_xdp_policy, NULL);
2246 if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
2251 if (xdp[IFLA_XDP_FLAGS]) {
2252 xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
2253 if (xdp_flags & ~XDP_FLAGS_MASK) {
2257 if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
2263 if (xdp[IFLA_XDP_FD]) {
2264 err = dev_change_xdp_fd(dev, extack,
2265 nla_get_s32(xdp[IFLA_XDP_FD]),
2269 status |= DO_SETLINK_NOTIFY;
2274 if (status & DO_SETLINK_MODIFIED) {
2275 if (status & DO_SETLINK_NOTIFY)
2276 netdev_state_change(dev);
2279 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2286 static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2287 struct netlink_ext_ack *extack)
2289 struct net *net = sock_net(skb->sk);
2290 struct ifinfomsg *ifm;
2291 struct net_device *dev;
2293 struct nlattr *tb[IFLA_MAX+1];
2294 char ifname[IFNAMSIZ];
2296 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy,
2301 if (tb[IFLA_IFNAME])
2302 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2307 ifm = nlmsg_data(nlh);
2308 if (ifm->ifi_index > 0)
2309 dev = __dev_get_by_index(net, ifm->ifi_index);
2310 else if (tb[IFLA_IFNAME])
2311 dev = __dev_get_by_name(net, ifname);
2320 err = validate_linkmsg(dev, tb);
2324 err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
2329 static int rtnl_group_dellink(const struct net *net, int group)
2331 struct net_device *dev, *aux;
2332 LIST_HEAD(list_kill);
2338 for_each_netdev(net, dev) {
2339 if (dev->group == group) {
2340 const struct rtnl_link_ops *ops;
2343 ops = dev->rtnl_link_ops;
2344 if (!ops || !ops->dellink)
2352 for_each_netdev_safe(net, dev, aux) {
2353 if (dev->group == group) {
2354 const struct rtnl_link_ops *ops;
2356 ops = dev->rtnl_link_ops;
2357 ops->dellink(dev, &list_kill);
2360 unregister_netdevice_many(&list_kill);
2365 int rtnl_delete_link(struct net_device *dev)
2367 const struct rtnl_link_ops *ops;
2368 LIST_HEAD(list_kill);
2370 ops = dev->rtnl_link_ops;
2371 if (!ops || !ops->dellink)
2374 ops->dellink(dev, &list_kill);
2375 unregister_netdevice_many(&list_kill);
2379 EXPORT_SYMBOL_GPL(rtnl_delete_link);
2381 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
2382 struct netlink_ext_ack *extack)
2384 struct net *net = sock_net(skb->sk);
2385 struct net_device *dev;
2386 struct ifinfomsg *ifm;
2387 char ifname[IFNAMSIZ];
2388 struct nlattr *tb[IFLA_MAX+1];
2391 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2395 if (tb[IFLA_IFNAME])
2396 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2398 ifm = nlmsg_data(nlh);
2399 if (ifm->ifi_index > 0)
2400 dev = __dev_get_by_index(net, ifm->ifi_index);
2401 else if (tb[IFLA_IFNAME])
2402 dev = __dev_get_by_name(net, ifname);
2403 else if (tb[IFLA_GROUP])
2404 return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP]));
2411 return rtnl_delete_link(dev);
2414 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2416 unsigned int old_flags;
2419 old_flags = dev->flags;
2420 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
2421 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
2426 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2428 __dev_notify_flags(dev, old_flags, ~0U);
2431 EXPORT_SYMBOL(rtnl_configure_link);
2433 struct net_device *rtnl_create_link(struct net *net,
2434 const char *ifname, unsigned char name_assign_type,
2435 const struct rtnl_link_ops *ops, struct nlattr *tb[])
2437 struct net_device *dev;
2438 unsigned int num_tx_queues = 1;
2439 unsigned int num_rx_queues = 1;
2441 if (tb[IFLA_NUM_TX_QUEUES])
2442 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
2443 else if (ops->get_num_tx_queues)
2444 num_tx_queues = ops->get_num_tx_queues();
2446 if (tb[IFLA_NUM_RX_QUEUES])
2447 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
2448 else if (ops->get_num_rx_queues)
2449 num_rx_queues = ops->get_num_rx_queues();
2451 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2452 ops->setup, num_tx_queues, num_rx_queues);
2454 return ERR_PTR(-ENOMEM);
2456 dev_net_set(dev, net);
2457 dev->rtnl_link_ops = ops;
2458 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
2461 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
2462 if (tb[IFLA_ADDRESS]) {
2463 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
2464 nla_len(tb[IFLA_ADDRESS]));
2465 dev->addr_assign_type = NET_ADDR_SET;
2467 if (tb[IFLA_BROADCAST])
2468 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
2469 nla_len(tb[IFLA_BROADCAST]));
2470 if (tb[IFLA_TXQLEN])
2471 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
2472 if (tb[IFLA_OPERSTATE])
2473 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
2474 if (tb[IFLA_LINKMODE])
2475 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
2477 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
2481 EXPORT_SYMBOL(rtnl_create_link);
2483 static int rtnl_group_changelink(const struct sk_buff *skb,
2484 struct net *net, int group,
2485 struct ifinfomsg *ifm,
2486 struct netlink_ext_ack *extack,
2489 struct net_device *dev, *aux;
2492 for_each_netdev_safe(net, dev, aux) {
2493 if (dev->group == group) {
2494 err = do_setlink(skb, dev, ifm, extack, tb, NULL, 0);
2503 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2504 struct netlink_ext_ack *extack)
2506 struct net *net = sock_net(skb->sk);
2507 const struct rtnl_link_ops *ops;
2508 const struct rtnl_link_ops *m_ops = NULL;
2509 struct net_device *dev;
2510 struct net_device *master_dev = NULL;
2511 struct ifinfomsg *ifm;
2512 char kind[MODULE_NAME_LEN];
2513 char ifname[IFNAMSIZ];
2514 struct nlattr *tb[IFLA_MAX+1];
2515 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
2516 unsigned char name_assign_type = NET_NAME_USER;
2519 #ifdef CONFIG_MODULES
2522 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2526 if (tb[IFLA_IFNAME])
2527 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2531 ifm = nlmsg_data(nlh);
2532 if (ifm->ifi_index > 0)
2533 dev = __dev_get_by_index(net, ifm->ifi_index);
2536 dev = __dev_get_by_name(net, ifname);
2542 master_dev = netdev_master_upper_dev_get(dev);
2544 m_ops = master_dev->rtnl_link_ops;
2547 err = validate_linkmsg(dev, tb);
2551 if (tb[IFLA_LINKINFO]) {
2552 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
2553 tb[IFLA_LINKINFO], ifla_info_policy,
2558 memset(linkinfo, 0, sizeof(linkinfo));
2560 if (linkinfo[IFLA_INFO_KIND]) {
2561 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
2562 ops = rtnl_link_ops_get(kind);
2569 struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
2570 struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
2571 struct nlattr **data = NULL;
2572 struct nlattr **slave_data = NULL;
2573 struct net *dest_net, *link_net = NULL;
2576 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
2577 err = nla_parse_nested(attr, ops->maxtype,
2578 linkinfo[IFLA_INFO_DATA],
2584 if (ops->validate) {
2585 err = ops->validate(tb, data, extack);
2592 if (m_ops->slave_maxtype &&
2593 linkinfo[IFLA_INFO_SLAVE_DATA]) {
2594 err = nla_parse_nested(slave_attr,
2595 m_ops->slave_maxtype,
2596 linkinfo[IFLA_INFO_SLAVE_DATA],
2597 m_ops->slave_policy,
2601 slave_data = slave_attr;
2603 if (m_ops->slave_validate) {
2604 err = m_ops->slave_validate(tb, slave_data,
2614 if (nlh->nlmsg_flags & NLM_F_EXCL)
2616 if (nlh->nlmsg_flags & NLM_F_REPLACE)
2619 if (linkinfo[IFLA_INFO_DATA]) {
2620 if (!ops || ops != dev->rtnl_link_ops ||
2624 err = ops->changelink(dev, tb, data, extack);
2627 status |= DO_SETLINK_NOTIFY;
2630 if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
2631 if (!m_ops || !m_ops->slave_changelink)
2634 err = m_ops->slave_changelink(master_dev, dev,
2639 status |= DO_SETLINK_NOTIFY;
2642 return do_setlink(skb, dev, ifm, extack, tb, ifname,
2646 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2647 if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
2648 return rtnl_group_changelink(skb, net,
2649 nla_get_u32(tb[IFLA_GROUP]),
2654 if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
2658 #ifdef CONFIG_MODULES
2661 request_module("rtnl-link-%s", kind);
2663 ops = rtnl_link_ops_get(kind);
2675 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
2676 name_assign_type = NET_NAME_ENUM;
2679 dest_net = rtnl_link_get_net(net, tb);
2680 if (IS_ERR(dest_net))
2681 return PTR_ERR(dest_net);
2684 if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
2687 if (tb[IFLA_LINK_NETNSID]) {
2688 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
2690 link_net = get_net_ns_by_id(dest_net, id);
2696 if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
2700 dev = rtnl_create_link(link_net ? : dest_net, ifname,
2701 name_assign_type, ops, tb);
2707 dev->ifindex = ifm->ifi_index;
2710 err = ops->newlink(link_net ? : net, dev, tb, data,
2712 /* Drivers should call free_netdev() in ->destructor
2713 * and unregister it on failure after registration
2714 * so that device could be finally freed in rtnl_unlock.
2717 /* If device is not registered at all, free it now */
2718 if (dev->reg_state == NETREG_UNINITIALIZED)
2723 err = register_netdevice(dev);
2729 err = rtnl_configure_link(dev, ifm);
2731 goto out_unregister;
2733 err = dev_change_net_namespace(dev, dest_net, ifname);
2735 goto out_unregister;
2737 if (tb[IFLA_MASTER]) {
2738 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
2740 goto out_unregister;
2749 LIST_HEAD(list_kill);
2751 ops->dellink(dev, &list_kill);
2752 unregister_netdevice_many(&list_kill);
2754 unregister_netdevice(dev);
2760 static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2761 struct netlink_ext_ack *extack)
2763 struct net *net = sock_net(skb->sk);
2764 struct ifinfomsg *ifm;
2765 char ifname[IFNAMSIZ];
2766 struct nlattr *tb[IFLA_MAX+1];
2767 struct net_device *dev = NULL;
2768 struct sk_buff *nskb;
2770 u32 ext_filter_mask = 0;
2772 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
2776 if (tb[IFLA_IFNAME])
2777 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
2779 if (tb[IFLA_EXT_MASK])
2780 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2782 ifm = nlmsg_data(nlh);
2783 if (ifm->ifi_index > 0)
2784 dev = __dev_get_by_index(net, ifm->ifi_index);
2785 else if (tb[IFLA_IFNAME])
2786 dev = __dev_get_by_name(net, ifname);
2793 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
2797 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
2798 nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0);
2800 /* -EMSGSIZE implies BUG in if_nlmsg_size */
2801 WARN_ON(err == -EMSGSIZE);
2804 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
2809 static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
2811 struct net *net = sock_net(skb->sk);
2812 struct net_device *dev;
2813 struct nlattr *tb[IFLA_MAX+1];
2814 u32 ext_filter_mask = 0;
2815 u16 min_ifinfo_dump_size = 0;
2818 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
2819 hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
2820 sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
2822 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
2823 if (tb[IFLA_EXT_MASK])
2824 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
2827 if (!ext_filter_mask)
2828 return NLMSG_GOODSIZE;
2830 * traverse the list of net devices and compute the minimum
2831 * buffer size based upon the filter mask.
2833 list_for_each_entry(dev, &net->dev_base_head, dev_list) {
2834 min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
2839 return nlmsg_total_size(min_ifinfo_dump_size);
2842 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
2845 int s_idx = cb->family;
2849 for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
2850 int type = cb->nlh->nlmsg_type-RTM_BASE;
2851 if (idx < s_idx || idx == PF_PACKET)
2853 if (rtnl_msg_handlers[idx] == NULL ||
2854 rtnl_msg_handlers[idx][type].dumpit == NULL)
2857 memset(&cb->args[0], 0, sizeof(cb->args));
2861 if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
2869 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
2870 unsigned int change,
2871 u32 event, gfp_t flags)
2873 struct net *net = dev_net(dev);
2874 struct sk_buff *skb;
2876 size_t if_info_size;
2878 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
2882 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0, event);
2884 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
2885 WARN_ON(err == -EMSGSIZE);
2892 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
2896 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
2898 struct net *net = dev_net(dev);
2900 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
2903 static void rtmsg_ifinfo_event(int type, struct net_device *dev,
2904 unsigned int change, u32 event,
2907 struct sk_buff *skb;
2909 if (dev->reg_state != NETREG_REGISTERED)
2912 skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags);
2914 rtmsg_ifinfo_send(skb, dev, flags);
2917 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
2920 rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags);
2922 EXPORT_SYMBOL(rtmsg_ifinfo);
2924 static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
2925 struct net_device *dev,
2926 u8 *addr, u16 vid, u32 pid, u32 seq,
2927 int type, unsigned int flags,
2928 int nlflags, u16 ndm_state)
2930 struct nlmsghdr *nlh;
2933 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
2937 ndm = nlmsg_data(nlh);
2938 ndm->ndm_family = AF_BRIDGE;
2941 ndm->ndm_flags = flags;
2943 ndm->ndm_ifindex = dev->ifindex;
2944 ndm->ndm_state = ndm_state;
2946 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
2947 goto nla_put_failure;
2949 if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
2950 goto nla_put_failure;
2952 nlmsg_end(skb, nlh);
2956 nlmsg_cancel(skb, nlh);
2960 static inline size_t rtnl_fdb_nlmsg_size(void)
2962 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
2963 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
2964 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
2968 static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
2971 struct net *net = dev_net(dev);
2972 struct sk_buff *skb;
2975 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
2979 err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
2980 0, 0, type, NTF_SELF, 0, ndm_state);
2986 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2989 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2993 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
2995 int ndo_dflt_fdb_add(struct ndmsg *ndm,
2996 struct nlattr *tb[],
2997 struct net_device *dev,
2998 const unsigned char *addr, u16 vid,
3003 /* If aging addresses are supported device will need to
3004 * implement its own handler for this.
3006 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3007 pr_info("%s: FDB only supports static addresses\n", dev->name);
3012 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
3016 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3017 err = dev_uc_add_excl(dev, addr);
3018 else if (is_multicast_ether_addr(addr))
3019 err = dev_mc_add_excl(dev, addr);
3021 /* Only return duplicate errors if NLM_F_EXCL is set */
3022 if (err == -EEXIST && !(flags & NLM_F_EXCL))
3027 EXPORT_SYMBOL(ndo_dflt_fdb_add);
3029 static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid)
3034 if (nla_len(vlan_attr) != sizeof(u16)) {
3035 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
3039 vid = nla_get_u16(vlan_attr);
3041 if (!vid || vid >= VLAN_VID_MASK) {
3042 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
3051 static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
3052 struct netlink_ext_ack *extack)
3054 struct net *net = sock_net(skb->sk);
3056 struct nlattr *tb[NDA_MAX+1];
3057 struct net_device *dev;
3062 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3066 ndm = nlmsg_data(nlh);
3067 if (ndm->ndm_ifindex == 0) {
3068 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
3072 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3074 pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
3078 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3079 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
3083 addr = nla_data(tb[NDA_LLADDR]);
3085 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3091 /* Support fdb on master device the net/bridge default case */
3092 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3093 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3094 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3095 const struct net_device_ops *ops = br_dev->netdev_ops;
3097 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
3102 ndm->ndm_flags &= ~NTF_MASTER;
3105 /* Embedded bridge, macvlan, and any other device support */
3106 if ((ndm->ndm_flags & NTF_SELF)) {
3107 if (dev->netdev_ops->ndo_fdb_add)
3108 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
3112 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
3116 rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
3118 ndm->ndm_flags &= ~NTF_SELF;
3126 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3128 int ndo_dflt_fdb_del(struct ndmsg *ndm,
3129 struct nlattr *tb[],
3130 struct net_device *dev,
3131 const unsigned char *addr, u16 vid)
3135 /* If aging addresses are supported device will need to
3136 * implement its own handler for this.
3138 if (!(ndm->ndm_state & NUD_PERMANENT)) {
3139 pr_info("%s: FDB only supports static addresses\n", dev->name);
3143 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3144 err = dev_uc_del(dev, addr);
3145 else if (is_multicast_ether_addr(addr))
3146 err = dev_mc_del(dev, addr);
3150 EXPORT_SYMBOL(ndo_dflt_fdb_del);
3152 static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
3153 struct netlink_ext_ack *extack)
3155 struct net *net = sock_net(skb->sk);
3157 struct nlattr *tb[NDA_MAX+1];
3158 struct net_device *dev;
3163 if (!netlink_capable(skb, CAP_NET_ADMIN))
3166 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
3170 ndm = nlmsg_data(nlh);
3171 if (ndm->ndm_ifindex == 0) {
3172 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
3176 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
3178 pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
3182 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
3183 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
3187 addr = nla_data(tb[NDA_LLADDR]);
3189 err = fdb_vid_parse(tb[NDA_VLAN], &vid);
3195 /* Support fdb on master device the net/bridge default case */
3196 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
3197 (dev->priv_flags & IFF_BRIDGE_PORT)) {
3198 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3199 const struct net_device_ops *ops = br_dev->netdev_ops;
3201 if (ops->ndo_fdb_del)
3202 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
3207 ndm->ndm_flags &= ~NTF_MASTER;
3210 /* Embedded bridge, macvlan, and any other device support */
3211 if (ndm->ndm_flags & NTF_SELF) {
3212 if (dev->netdev_ops->ndo_fdb_del)
3213 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr,
3216 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
3219 rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
3221 ndm->ndm_flags &= ~NTF_SELF;
3228 static int nlmsg_populate_fdb(struct sk_buff *skb,
3229 struct netlink_callback *cb,
3230 struct net_device *dev,
3232 struct netdev_hw_addr_list *list)
3234 struct netdev_hw_addr *ha;
3238 portid = NETLINK_CB(cb->skb).portid;
3239 seq = cb->nlh->nlmsg_seq;
3241 list_for_each_entry(ha, &list->list, list) {
3242 if (*idx < cb->args[2])
3245 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
3247 RTM_NEWNEIGH, NTF_SELF,
3248 NLM_F_MULTI, NUD_PERMANENT);
3258 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3259 * @nlh: netlink message header
3262 * Default netdevice operation to dump the existing unicast address list.
3263 * Returns number of addresses from list put in skb.
3265 int ndo_dflt_fdb_dump(struct sk_buff *skb,
3266 struct netlink_callback *cb,
3267 struct net_device *dev,
3268 struct net_device *filter_dev,
3273 netif_addr_lock_bh(dev);
3274 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
3277 err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
3279 netif_addr_unlock_bh(dev);
3282 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
3284 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3286 struct net_device *dev;
3287 struct nlattr *tb[IFLA_MAX+1];
3288 struct net_device *br_dev = NULL;
3289 const struct net_device_ops *ops = NULL;
3290 const struct net_device_ops *cops = NULL;
3291 struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
3292 struct net *net = sock_net(skb->sk);
3293 struct hlist_head *head;
3301 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3302 IFLA_MAX, ifla_policy, NULL);
3305 } else if (err == 0) {
3306 if (tb[IFLA_MASTER])
3307 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3310 brport_idx = ifm->ifi_index;
3313 br_dev = __dev_get_by_index(net, br_idx);
3317 ops = br_dev->netdev_ops;
3321 s_idx = cb->args[1];
3323 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3325 head = &net->dev_index_head[h];
3326 hlist_for_each_entry(dev, head, index_hlist) {
3328 if (brport_idx && (dev->ifindex != brport_idx))
3331 if (!br_idx) { /* user did not specify a specific bridge */
3332 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3333 br_dev = netdev_master_upper_dev_get(dev);
3334 cops = br_dev->netdev_ops;
3337 if (dev != br_dev &&
3338 !(dev->priv_flags & IFF_BRIDGE_PORT))
3341 if (br_dev != netdev_master_upper_dev_get(dev) &&
3342 !(dev->priv_flags & IFF_EBRIDGE))
3350 if (dev->priv_flags & IFF_BRIDGE_PORT) {
3351 if (cops && cops->ndo_fdb_dump) {
3352 err = cops->ndo_fdb_dump(skb, cb,
3355 if (err == -EMSGSIZE)
3360 if (dev->netdev_ops->ndo_fdb_dump)
3361 err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
3365 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
3367 if (err == -EMSGSIZE)
3372 /* reset fdb offset to 0 for rest of the interfaces */
3388 static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
3389 unsigned int attrnum, unsigned int flag)
3392 return nla_put_u8(skb, attrnum, !!(flags & flag));
3396 int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3397 struct net_device *dev, u16 mode,
3398 u32 flags, u32 mask, int nlflags,
3400 int (*vlan_fill)(struct sk_buff *skb,
3401 struct net_device *dev,
3404 struct nlmsghdr *nlh;
3405 struct ifinfomsg *ifm;
3406 struct nlattr *br_afspec;
3407 struct nlattr *protinfo;
3408 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
3409 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3412 nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
3416 ifm = nlmsg_data(nlh);
3417 ifm->ifi_family = AF_BRIDGE;
3419 ifm->ifi_type = dev->type;
3420 ifm->ifi_index = dev->ifindex;
3421 ifm->ifi_flags = dev_get_flags(dev);
3422 ifm->ifi_change = 0;
3425 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
3426 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
3427 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
3429 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
3431 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
3432 (dev->ifindex != dev_get_iflink(dev) &&
3433 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
3434 goto nla_put_failure;
3436 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
3438 goto nla_put_failure;
3440 if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
3441 nla_nest_cancel(skb, br_afspec);
3442 goto nla_put_failure;
3445 if (mode != BRIDGE_MODE_UNDEF) {
3446 if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
3447 nla_nest_cancel(skb, br_afspec);
3448 goto nla_put_failure;
3452 err = vlan_fill(skb, dev, filter_mask);
3454 nla_nest_cancel(skb, br_afspec);
3455 goto nla_put_failure;
3458 nla_nest_end(skb, br_afspec);
3460 protinfo = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
3462 goto nla_put_failure;
3464 if (brport_nla_put_flag(skb, flags, mask,
3465 IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
3466 brport_nla_put_flag(skb, flags, mask,
3467 IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
3468 brport_nla_put_flag(skb, flags, mask,
3469 IFLA_BRPORT_FAST_LEAVE,
3470 BR_MULTICAST_FAST_LEAVE) ||
3471 brport_nla_put_flag(skb, flags, mask,
3472 IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
3473 brport_nla_put_flag(skb, flags, mask,
3474 IFLA_BRPORT_LEARNING, BR_LEARNING) ||
3475 brport_nla_put_flag(skb, flags, mask,
3476 IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
3477 brport_nla_put_flag(skb, flags, mask,
3478 IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
3479 brport_nla_put_flag(skb, flags, mask,
3480 IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
3481 nla_nest_cancel(skb, protinfo);
3482 goto nla_put_failure;
3485 nla_nest_end(skb, protinfo);
3487 nlmsg_end(skb, nlh);
3490 nlmsg_cancel(skb, nlh);
3491 return err ? err : -EMSGSIZE;
3493 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
3495 static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3497 struct net *net = sock_net(skb->sk);
3498 struct net_device *dev;
3500 u32 portid = NETLINK_CB(cb->skb).portid;
3501 u32 seq = cb->nlh->nlmsg_seq;
3502 u32 filter_mask = 0;
3505 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
3506 struct nlattr *extfilt;
3508 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
3511 if (nla_len(extfilt) < sizeof(filter_mask))
3514 filter_mask = nla_get_u32(extfilt);
3519 for_each_netdev_rcu(net, dev) {
3520 const struct net_device_ops *ops = dev->netdev_ops;
3521 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3523 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
3524 if (idx >= cb->args[0]) {
3525 err = br_dev->netdev_ops->ndo_bridge_getlink(
3526 skb, portid, seq, dev,
3527 filter_mask, NLM_F_MULTI);
3528 if (err < 0 && err != -EOPNOTSUPP) {
3529 if (likely(skb->len))
3538 if (ops->ndo_bridge_getlink) {
3539 if (idx >= cb->args[0]) {
3540 err = ops->ndo_bridge_getlink(skb, portid,
3544 if (err < 0 && err != -EOPNOTSUPP) {
3545 if (likely(skb->len))
3562 static inline size_t bridge_nlmsg_size(void)
3564 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
3565 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
3566 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
3567 + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
3568 + nla_total_size(sizeof(u32)) /* IFLA_MTU */
3569 + nla_total_size(sizeof(u32)) /* IFLA_LINK */
3570 + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
3571 + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
3572 + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
3573 + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
3574 + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
3577 static int rtnl_bridge_notify(struct net_device *dev)
3579 struct net *net = dev_net(dev);
3580 struct sk_buff *skb;
3581 int err = -EOPNOTSUPP;
3583 if (!dev->netdev_ops->ndo_bridge_getlink)
3586 skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
3592 err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
3599 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
3602 WARN_ON(err == -EMSGSIZE);
3605 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
3609 static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
3610 struct netlink_ext_ack *extack)
3612 struct net *net = sock_net(skb->sk);
3613 struct ifinfomsg *ifm;
3614 struct net_device *dev;
3615 struct nlattr *br_spec, *attr = NULL;
3616 int rem, err = -EOPNOTSUPP;
3618 bool have_flags = false;
3620 if (nlmsg_len(nlh) < sizeof(*ifm))
3623 ifm = nlmsg_data(nlh);
3624 if (ifm->ifi_family != AF_BRIDGE)
3625 return -EPFNOSUPPORT;
3627 dev = __dev_get_by_index(net, ifm->ifi_index);
3629 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3633 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3635 nla_for_each_nested(attr, br_spec, rem) {
3636 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3637 if (nla_len(attr) < sizeof(flags))
3641 flags = nla_get_u16(attr);
3647 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3648 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3650 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
3655 err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
3659 flags &= ~BRIDGE_FLAGS_MASTER;
3662 if ((flags & BRIDGE_FLAGS_SELF)) {
3663 if (!dev->netdev_ops->ndo_bridge_setlink)
3666 err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
3669 flags &= ~BRIDGE_FLAGS_SELF;
3671 /* Generate event to notify upper layer of bridge
3674 err = rtnl_bridge_notify(dev);
3679 memcpy(nla_data(attr), &flags, sizeof(flags));
3684 static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
3685 struct netlink_ext_ack *extack)
3687 struct net *net = sock_net(skb->sk);
3688 struct ifinfomsg *ifm;
3689 struct net_device *dev;
3690 struct nlattr *br_spec, *attr = NULL;
3691 int rem, err = -EOPNOTSUPP;
3693 bool have_flags = false;
3695 if (nlmsg_len(nlh) < sizeof(*ifm))
3698 ifm = nlmsg_data(nlh);
3699 if (ifm->ifi_family != AF_BRIDGE)
3700 return -EPFNOSUPPORT;
3702 dev = __dev_get_by_index(net, ifm->ifi_index);
3704 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3708 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3710 nla_for_each_nested(attr, br_spec, rem) {
3711 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
3712 if (nla_len(attr) < sizeof(flags))
3716 flags = nla_get_u16(attr);
3722 if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
3723 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3725 if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
3730 err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
3734 flags &= ~BRIDGE_FLAGS_MASTER;
3737 if ((flags & BRIDGE_FLAGS_SELF)) {
3738 if (!dev->netdev_ops->ndo_bridge_dellink)
3741 err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
3745 flags &= ~BRIDGE_FLAGS_SELF;
3747 /* Generate event to notify upper layer of bridge
3750 err = rtnl_bridge_notify(dev);
3755 memcpy(nla_data(attr), &flags, sizeof(flags));
3760 static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
3762 return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
3763 (!idxattr || idxattr == attrid);
3766 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
3767 static int rtnl_get_offload_stats_attr_size(int attr_id)
3770 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
3771 return sizeof(struct rtnl_link_stats64);
3777 static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
3780 struct nlattr *attr = NULL;
3785 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3786 dev->netdev_ops->ndo_get_offload_stats))
3789 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3790 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3791 if (attr_id < *prividx)
3794 size = rtnl_get_offload_stats_attr_size(attr_id);
3798 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
3801 attr = nla_reserve_64bit(skb, attr_id, size,
3802 IFLA_OFFLOAD_XSTATS_UNSPEC);
3804 goto nla_put_failure;
3806 attr_data = nla_data(attr);
3807 memset(attr_data, 0, size);
3808 err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
3811 goto get_offload_stats_failure;
3822 get_offload_stats_failure:
3827 static int rtnl_get_offload_stats_size(const struct net_device *dev)
3833 if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
3834 dev->netdev_ops->ndo_get_offload_stats))
3837 for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
3838 attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
3839 if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
3841 size = rtnl_get_offload_stats_attr_size(attr_id);
3842 nla_size += nla_total_size_64bit(size);
3846 nla_size += nla_total_size(0);
3851 static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
3852 int type, u32 pid, u32 seq, u32 change,
3853 unsigned int flags, unsigned int filter_mask,
3854 int *idxattr, int *prividx)
3856 struct if_stats_msg *ifsm;
3857 struct nlmsghdr *nlh;
3858 struct nlattr *attr;
3859 int s_prividx = *prividx;
3864 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
3868 ifsm = nlmsg_data(nlh);
3869 ifsm->ifindex = dev->ifindex;
3870 ifsm->filter_mask = filter_mask;
3872 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
3873 struct rtnl_link_stats64 *sp;
3875 attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
3876 sizeof(struct rtnl_link_stats64),
3879 goto nla_put_failure;
3881 sp = nla_data(attr);
3882 dev_get_stats(dev, sp);
3885 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
3886 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3888 if (ops && ops->fill_linkxstats) {
3889 *idxattr = IFLA_STATS_LINK_XSTATS;
3890 attr = nla_nest_start(skb,
3891 IFLA_STATS_LINK_XSTATS);
3893 goto nla_put_failure;
3895 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3896 nla_nest_end(skb, attr);
3898 goto nla_put_failure;
3903 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
3905 const struct rtnl_link_ops *ops = NULL;
3906 const struct net_device *master;
3908 master = netdev_master_upper_dev_get(dev);
3910 ops = master->rtnl_link_ops;
3911 if (ops && ops->fill_linkxstats) {
3912 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
3913 attr = nla_nest_start(skb,
3914 IFLA_STATS_LINK_XSTATS_SLAVE);
3916 goto nla_put_failure;
3918 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3919 nla_nest_end(skb, attr);
3921 goto nla_put_failure;
3926 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
3928 *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
3929 attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
3931 goto nla_put_failure;
3933 err = rtnl_get_offload_stats(skb, dev, prividx);
3934 if (err == -ENODATA)
3935 nla_nest_cancel(skb, attr);
3937 nla_nest_end(skb, attr);
3939 if (err && err != -ENODATA)
3940 goto nla_put_failure;
3944 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
3945 struct rtnl_af_ops *af_ops;
3947 *idxattr = IFLA_STATS_AF_SPEC;
3948 attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
3950 goto nla_put_failure;
3952 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
3953 if (af_ops->fill_stats_af) {
3957 af = nla_nest_start(skb, af_ops->family);
3959 goto nla_put_failure;
3961 err = af_ops->fill_stats_af(skb, dev);
3963 if (err == -ENODATA)
3964 nla_nest_cancel(skb, af);
3966 goto nla_put_failure;
3968 nla_nest_end(skb, af);
3972 nla_nest_end(skb, attr);
3977 nlmsg_end(skb, nlh);
3982 /* not a multi message or no progress mean a real error */
3983 if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
3984 nlmsg_cancel(skb, nlh);
3986 nlmsg_end(skb, nlh);
3991 static size_t if_nlmsg_stats_size(const struct net_device *dev,
3996 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
3997 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
3999 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
4000 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
4001 int attr = IFLA_STATS_LINK_XSTATS;
4003 if (ops && ops->get_linkxstats_size) {
4004 size += nla_total_size(ops->get_linkxstats_size(dev,
4006 /* for IFLA_STATS_LINK_XSTATS */
4007 size += nla_total_size(0);
4011 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
4012 struct net_device *_dev = (struct net_device *)dev;
4013 const struct rtnl_link_ops *ops = NULL;
4014 const struct net_device *master;
4016 /* netdev_master_upper_dev_get can't take const */
4017 master = netdev_master_upper_dev_get(_dev);
4019 ops = master->rtnl_link_ops;
4020 if (ops && ops->get_linkxstats_size) {
4021 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
4023 size += nla_total_size(ops->get_linkxstats_size(dev,
4025 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4026 size += nla_total_size(0);
4030 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
4031 size += rtnl_get_offload_stats_size(dev);
4033 if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
4034 struct rtnl_af_ops *af_ops;
4036 /* for IFLA_STATS_AF_SPEC */
4037 size += nla_total_size(0);
4039 list_for_each_entry(af_ops, &rtnl_af_ops, list) {
4040 if (af_ops->get_stats_af_size) {
4041 size += nla_total_size(
4042 af_ops->get_stats_af_size(dev));
4045 size += nla_total_size(0);
4053 static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
4054 struct netlink_ext_ack *extack)
4056 struct net *net = sock_net(skb->sk);
4057 struct net_device *dev = NULL;
4058 int idxattr = 0, prividx = 0;
4059 struct if_stats_msg *ifsm;
4060 struct sk_buff *nskb;
4064 if (nlmsg_len(nlh) < sizeof(*ifsm))
4067 ifsm = nlmsg_data(nlh);
4068 if (ifsm->ifindex > 0)
4069 dev = __dev_get_by_index(net, ifsm->ifindex);
4076 filter_mask = ifsm->filter_mask;
4080 nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
4084 err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
4085 NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
4086 0, filter_mask, &idxattr, &prividx);
4088 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
4089 WARN_ON(err == -EMSGSIZE);
4092 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
4098 static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
4100 int h, s_h, err, s_idx, s_idxattr, s_prividx;
4101 struct net *net = sock_net(skb->sk);
4102 unsigned int flags = NLM_F_MULTI;
4103 struct if_stats_msg *ifsm;
4104 struct hlist_head *head;
4105 struct net_device *dev;
4106 u32 filter_mask = 0;
4110 s_idx = cb->args[1];
4111 s_idxattr = cb->args[2];
4112 s_prividx = cb->args[3];
4114 cb->seq = net->dev_base_seq;
4116 if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
4119 ifsm = nlmsg_data(cb->nlh);
4120 filter_mask = ifsm->filter_mask;
4124 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4126 head = &net->dev_index_head[h];
4127 hlist_for_each_entry(dev, head, index_hlist) {
4130 err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
4131 NETLINK_CB(cb->skb).portid,
4132 cb->nlh->nlmsg_seq, 0,
4134 &s_idxattr, &s_prividx);
4135 /* If we ran out of room on the first message,
4138 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
4144 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4150 cb->args[3] = s_prividx;
4151 cb->args[2] = s_idxattr;
4158 /* Process one rtnetlink message. */
4160 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
4161 struct netlink_ext_ack *extack)
4163 struct net *net = sock_net(skb->sk);
4164 rtnl_doit_func doit;
4170 type = nlh->nlmsg_type;
4176 /* All the messages must have at least 1 byte length */
4177 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
4180 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
4183 if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
4186 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
4188 rtnl_dumpit_func dumpit;
4189 rtnl_calcit_func calcit;
4190 u16 min_dump_alloc = 0;
4192 dumpit = rtnl_get_dumpit(family, type);
4195 calcit = rtnl_get_calcit(family, type);
4197 min_dump_alloc = calcit(skb, nlh);
4202 struct netlink_dump_control c = {
4204 .min_dump_alloc = min_dump_alloc,
4206 err = netlink_dump_start(rtnl, skb, nlh, &c);
4212 doit = rtnl_get_doit(family, type);
4216 return doit(skb, nlh, extack);
4219 static void rtnetlink_rcv(struct sk_buff *skb)
4222 netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
4226 static int rtnetlink_bind(struct net *net, int group)
4229 case RTNLGRP_IPV4_MROUTE_R:
4230 case RTNLGRP_IPV6_MROUTE_R:
4231 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4238 static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
4240 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4244 case NETDEV_CHANGENAME:
4245 case NETDEV_FEAT_CHANGE:
4246 case NETDEV_BONDING_FAILOVER:
4247 case NETDEV_NOTIFY_PEERS:
4248 case NETDEV_RESEND_IGMP:
4249 case NETDEV_CHANGEINFODATA:
4250 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4259 static struct notifier_block rtnetlink_dev_notifier = {
4260 .notifier_call = rtnetlink_event,
4264 static int __net_init rtnetlink_net_init(struct net *net)
4267 struct netlink_kernel_cfg cfg = {
4268 .groups = RTNLGRP_MAX,
4269 .input = rtnetlink_rcv,
4270 .cb_mutex = &rtnl_mutex,
4271 .flags = NL_CFG_F_NONROOT_RECV,
4272 .bind = rtnetlink_bind,
4275 sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
4282 static void __net_exit rtnetlink_net_exit(struct net *net)
4284 netlink_kernel_release(net->rtnl);
4288 static struct pernet_operations rtnetlink_net_ops = {
4289 .init = rtnetlink_net_init,
4290 .exit = rtnetlink_net_exit,
4293 void __init rtnetlink_init(void)
4295 if (register_pernet_subsys(&rtnetlink_net_ops))
4296 panic("rtnetlink_init: cannot initialize rtnetlink\n");
4298 register_netdevice_notifier(&rtnetlink_dev_notifier);
4300 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
4301 rtnl_dump_ifinfo, rtnl_calcit);
4302 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
4303 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
4304 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
4306 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
4307 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
4308 rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, NULL);
4310 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
4311 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
4312 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
4314 rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
4315 rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
4316 rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
4318 rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,