2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
121 #include <net/mpls.h>
122 #include <linux/ipv6.h>
123 #include <linux/in.h>
124 #include <linux/jhash.h>
125 #include <linux/random.h>
126 #include <trace/events/napi.h>
127 #include <trace/events/net.h>
128 #include <trace/events/skb.h>
129 #include <linux/pci.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
138 #include <linux/netfilter_ingress.h>
140 #include "net-sysfs.h"
142 /* Instead of increasing this, you should create a hash table. */
143 #define MAX_GRO_SKBS 8
145 /* This should be increased if a protocol with a bigger head is added. */
146 #define GRO_MAX_HEAD (MAX_HEADER + 128)
148 static DEFINE_SPINLOCK(ptype_lock);
149 static DEFINE_SPINLOCK(offload_lock);
150 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
151 struct list_head ptype_all __read_mostly; /* Taps */
152 static struct list_head offload_base __read_mostly;
154 static int netif_rx_internal(struct sk_buff *skb);
155 static int call_netdevice_notifiers_info(unsigned long val,
156 struct net_device *dev,
157 struct netdev_notifier_info *info);
160 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
163 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
165 * Writers must hold the rtnl semaphore while they loop through the
166 * dev_base_head list, and hold dev_base_lock for writing when they do the
167 * actual updates. This allows pure readers to access the list even
168 * while a writer is preparing to update it.
170 * To put it another way, dev_base_lock is held for writing only to
171 * protect against pure readers; the rtnl semaphore provides the
172 * protection against other writers.
174 * See, for example usages, register_netdevice() and
175 * unregister_netdevice(), which must be called with the rtnl
178 DEFINE_RWLOCK(dev_base_lock);
179 EXPORT_SYMBOL(dev_base_lock);
181 /* protects napi_hash addition/deletion and napi_gen_id */
182 static DEFINE_SPINLOCK(napi_hash_lock);
184 static unsigned int napi_gen_id;
185 static DEFINE_HASHTABLE(napi_hash, 8);
187 static seqcount_t devnet_rename_seq;
189 static inline void dev_base_seq_inc(struct net *net)
191 while (++net->dev_base_seq == 0);
194 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
196 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
198 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
201 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
203 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
206 static inline void rps_lock(struct softnet_data *sd)
209 spin_lock(&sd->input_pkt_queue.lock);
213 static inline void rps_unlock(struct softnet_data *sd)
216 spin_unlock(&sd->input_pkt_queue.lock);
220 /* Device list insertion */
221 static void list_netdevice(struct net_device *dev)
223 struct net *net = dev_net(dev);
227 write_lock_bh(&dev_base_lock);
228 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
229 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
230 hlist_add_head_rcu(&dev->index_hlist,
231 dev_index_hash(net, dev->ifindex));
232 write_unlock_bh(&dev_base_lock);
234 dev_base_seq_inc(net);
237 /* Device list removal
238 * caller must respect a RCU grace period before freeing/reusing dev
240 static void unlist_netdevice(struct net_device *dev)
244 /* Unlink dev from the device chain */
245 write_lock_bh(&dev_base_lock);
246 list_del_rcu(&dev->dev_list);
247 hlist_del_rcu(&dev->name_hlist);
248 hlist_del_rcu(&dev->index_hlist);
249 write_unlock_bh(&dev_base_lock);
251 dev_base_seq_inc(dev_net(dev));
258 static RAW_NOTIFIER_HEAD(netdev_chain);
261 * Device drivers call our routines to queue packets here. We empty the
262 * queue in the local softnet handler.
265 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
266 EXPORT_PER_CPU_SYMBOL(softnet_data);
268 #ifdef CONFIG_LOCKDEP
270 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
271 * according to dev->type
273 static const unsigned short netdev_lock_type[] =
274 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
275 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
276 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
277 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
278 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
279 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
280 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
281 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
282 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
283 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
284 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
285 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
286 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
287 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
288 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
290 static const char *const netdev_lock_name[] =
291 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
292 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
293 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
294 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
295 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
296 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
297 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
298 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
299 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
300 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
301 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
302 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
303 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
304 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
305 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
307 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
308 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
310 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
314 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
315 if (netdev_lock_type[i] == dev_type)
317 /* the last key is used by default */
318 return ARRAY_SIZE(netdev_lock_type) - 1;
321 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
322 unsigned short dev_type)
326 i = netdev_lock_pos(dev_type);
327 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
328 netdev_lock_name[i]);
331 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
335 i = netdev_lock_pos(dev->type);
336 lockdep_set_class_and_name(&dev->addr_list_lock,
337 &netdev_addr_lock_key[i],
338 netdev_lock_name[i]);
341 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
342 unsigned short dev_type)
345 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
350 /*******************************************************************************
352 Protocol management and registration routines
354 *******************************************************************************/
357 * Add a protocol ID to the list. Now that the input handler is
358 * smarter we can dispense with all the messy stuff that used to be
361 * BEWARE!!! Protocol handlers, mangling input packets,
362 * MUST BE last in hash buckets and checking protocol handlers
363 * MUST start from promiscuous ptype_all chain in net_bh.
364 * It is true now, do not change it.
365 * Explanation follows: if protocol handler, mangling packet, will
366 * be the first on list, it is not able to sense, that packet
367 * is cloned and should be copied-on-write, so that it will
368 * change it and subsequent readers will get broken packet.
372 static inline struct list_head *ptype_head(const struct packet_type *pt)
374 if (pt->type == htons(ETH_P_ALL))
375 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
377 return pt->dev ? &pt->dev->ptype_specific :
378 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
382 * dev_add_pack - add packet handler
383 * @pt: packet type declaration
385 * Add a protocol handler to the networking stack. The passed &packet_type
386 * is linked into kernel lists and may not be freed until it has been
387 * removed from the kernel lists.
389 * This call does not sleep therefore it can not
390 * guarantee all CPU's that are in middle of receiving packets
391 * will see the new packet type (until the next received packet).
394 void dev_add_pack(struct packet_type *pt)
396 struct list_head *head = ptype_head(pt);
398 spin_lock(&ptype_lock);
399 list_add_rcu(&pt->list, head);
400 spin_unlock(&ptype_lock);
402 EXPORT_SYMBOL(dev_add_pack);
405 * __dev_remove_pack - remove packet handler
406 * @pt: packet type declaration
408 * Remove a protocol handler that was previously added to the kernel
409 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
410 * from the kernel lists and can be freed or reused once this function
413 * The packet type might still be in use by receivers
414 * and must not be freed until after all the CPU's have gone
415 * through a quiescent state.
417 void __dev_remove_pack(struct packet_type *pt)
419 struct list_head *head = ptype_head(pt);
420 struct packet_type *pt1;
422 spin_lock(&ptype_lock);
424 list_for_each_entry(pt1, head, list) {
426 list_del_rcu(&pt->list);
431 pr_warn("dev_remove_pack: %p not found\n", pt);
433 spin_unlock(&ptype_lock);
435 EXPORT_SYMBOL(__dev_remove_pack);
438 * dev_remove_pack - remove packet handler
439 * @pt: packet type declaration
441 * Remove a protocol handler that was previously added to the kernel
442 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
443 * from the kernel lists and can be freed or reused once this function
446 * This call sleeps to guarantee that no CPU is looking at the packet
449 void dev_remove_pack(struct packet_type *pt)
451 __dev_remove_pack(pt);
455 EXPORT_SYMBOL(dev_remove_pack);
459 * dev_add_offload - register offload handlers
460 * @po: protocol offload declaration
462 * Add protocol offload handlers to the networking stack. The passed
463 * &proto_offload is linked into kernel lists and may not be freed until
464 * it has been removed from the kernel lists.
466 * This call does not sleep therefore it can not
467 * guarantee all CPU's that are in middle of receiving packets
468 * will see the new offload handlers (until the next received packet).
470 void dev_add_offload(struct packet_offload *po)
472 struct list_head *head = &offload_base;
474 spin_lock(&offload_lock);
475 list_add_rcu(&po->list, head);
476 spin_unlock(&offload_lock);
478 EXPORT_SYMBOL(dev_add_offload);
481 * __dev_remove_offload - remove offload handler
482 * @po: packet offload declaration
484 * Remove a protocol offload handler that was previously added to the
485 * kernel offload handlers by dev_add_offload(). The passed &offload_type
486 * is removed from the kernel lists and can be freed or reused once this
489 * The packet type might still be in use by receivers
490 * and must not be freed until after all the CPU's have gone
491 * through a quiescent state.
493 static void __dev_remove_offload(struct packet_offload *po)
495 struct list_head *head = &offload_base;
496 struct packet_offload *po1;
498 spin_lock(&offload_lock);
500 list_for_each_entry(po1, head, list) {
502 list_del_rcu(&po->list);
507 pr_warn("dev_remove_offload: %p not found\n", po);
509 spin_unlock(&offload_lock);
513 * dev_remove_offload - remove packet offload handler
514 * @po: packet offload declaration
516 * Remove a packet offload handler that was previously added to the kernel
517 * offload handlers by dev_add_offload(). The passed &offload_type is
518 * removed from the kernel lists and can be freed or reused once this
521 * This call sleeps to guarantee that no CPU is looking at the packet
524 void dev_remove_offload(struct packet_offload *po)
526 __dev_remove_offload(po);
530 EXPORT_SYMBOL(dev_remove_offload);
532 /******************************************************************************
534 Device Boot-time Settings Routines
536 *******************************************************************************/
538 /* Boot time configuration table */
539 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
542 * netdev_boot_setup_add - add new setup entry
543 * @name: name of the device
544 * @map: configured settings for the device
546 * Adds new setup entry to the dev_boot_setup list. The function
547 * returns 0 on error and 1 on success. This is a generic routine to
550 static int netdev_boot_setup_add(char *name, struct ifmap *map)
552 struct netdev_boot_setup *s;
556 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
557 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
558 memset(s[i].name, 0, sizeof(s[i].name));
559 strlcpy(s[i].name, name, IFNAMSIZ);
560 memcpy(&s[i].map, map, sizeof(s[i].map));
565 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
569 * netdev_boot_setup_check - check boot time settings
570 * @dev: the netdevice
572 * Check boot time settings for the device.
573 * The found settings are set for the device to be used
574 * later in the device probing.
575 * Returns 0 if no settings found, 1 if they are.
577 int netdev_boot_setup_check(struct net_device *dev)
579 struct netdev_boot_setup *s = dev_boot_setup;
582 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
583 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
584 !strcmp(dev->name, s[i].name)) {
585 dev->irq = s[i].map.irq;
586 dev->base_addr = s[i].map.base_addr;
587 dev->mem_start = s[i].map.mem_start;
588 dev->mem_end = s[i].map.mem_end;
594 EXPORT_SYMBOL(netdev_boot_setup_check);
598 * netdev_boot_base - get address from boot time settings
599 * @prefix: prefix for network device
600 * @unit: id for network device
602 * Check boot time settings for the base address of device.
603 * The found settings are set for the device to be used
604 * later in the device probing.
605 * Returns 0 if no settings found.
607 unsigned long netdev_boot_base(const char *prefix, int unit)
609 const struct netdev_boot_setup *s = dev_boot_setup;
613 sprintf(name, "%s%d", prefix, unit);
616 * If device already registered then return base of 1
617 * to indicate not to probe for this interface
619 if (__dev_get_by_name(&init_net, name))
622 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
623 if (!strcmp(name, s[i].name))
624 return s[i].map.base_addr;
629 * Saves at boot time configured settings for any netdevice.
631 int __init netdev_boot_setup(char *str)
636 str = get_options(str, ARRAY_SIZE(ints), ints);
641 memset(&map, 0, sizeof(map));
645 map.base_addr = ints[2];
647 map.mem_start = ints[3];
649 map.mem_end = ints[4];
651 /* Add new entry to the list */
652 return netdev_boot_setup_add(str, &map);
655 __setup("netdev=", netdev_boot_setup);
657 /*******************************************************************************
659 Device Interface Subroutines
661 *******************************************************************************/
664 * dev_get_iflink - get 'iflink' value of a interface
665 * @dev: targeted interface
667 * Indicates the ifindex the interface is linked to.
668 * Physical interfaces have the same 'ifindex' and 'iflink' values.
671 int dev_get_iflink(const struct net_device *dev)
673 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
674 return dev->netdev_ops->ndo_get_iflink(dev);
676 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
677 if (dev->rtnl_link_ops)
682 EXPORT_SYMBOL(dev_get_iflink);
685 * __dev_get_by_name - find a device by its name
686 * @net: the applicable net namespace
687 * @name: name to find
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
696 struct net_device *__dev_get_by_name(struct net *net, const char *name)
698 struct net_device *dev;
699 struct hlist_head *head = dev_name_hash(net, name);
701 hlist_for_each_entry(dev, head, name_hlist)
702 if (!strncmp(dev->name, name, IFNAMSIZ))
707 EXPORT_SYMBOL(__dev_get_by_name);
710 * dev_get_by_name_rcu - find a device by its name
711 * @net: the applicable net namespace
712 * @name: name to find
714 * Find an interface by name.
715 * If the name is found a pointer to the device is returned.
716 * If the name is not found then %NULL is returned.
717 * The reference counters are not incremented so the caller must be
718 * careful with locks. The caller must hold RCU lock.
721 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
723 struct net_device *dev;
724 struct hlist_head *head = dev_name_hash(net, name);
726 hlist_for_each_entry_rcu(dev, head, name_hlist)
727 if (!strncmp(dev->name, name, IFNAMSIZ))
732 EXPORT_SYMBOL(dev_get_by_name_rcu);
735 * dev_get_by_name - find a device by its name
736 * @net: the applicable net namespace
737 * @name: name to find
739 * Find an interface by name. This can be called from any
740 * context and does its own locking. The returned handle has
741 * the usage count incremented and the caller must use dev_put() to
742 * release it when it is no longer needed. %NULL is returned if no
743 * matching device is found.
746 struct net_device *dev_get_by_name(struct net *net, const char *name)
748 struct net_device *dev;
751 dev = dev_get_by_name_rcu(net, name);
757 EXPORT_SYMBOL(dev_get_by_name);
760 * __dev_get_by_index - find a device by its ifindex
761 * @net: the applicable net namespace
762 * @ifindex: index of device
764 * Search for an interface by index. Returns %NULL if the device
765 * is not found or a pointer to the device. The device has not
766 * had its reference counter increased so the caller must be careful
767 * about locking. The caller must hold either the RTNL semaphore
771 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
773 struct net_device *dev;
774 struct hlist_head *head = dev_index_hash(net, ifindex);
776 hlist_for_each_entry(dev, head, index_hlist)
777 if (dev->ifindex == ifindex)
782 EXPORT_SYMBOL(__dev_get_by_index);
785 * dev_get_by_index_rcu - find a device by its ifindex
786 * @net: the applicable net namespace
787 * @ifindex: index of device
789 * Search for an interface by index. Returns %NULL if the device
790 * is not found or a pointer to the device. The device has not
791 * had its reference counter increased so the caller must be careful
792 * about locking. The caller must hold RCU lock.
795 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
797 struct net_device *dev;
798 struct hlist_head *head = dev_index_hash(net, ifindex);
800 hlist_for_each_entry_rcu(dev, head, index_hlist)
801 if (dev->ifindex == ifindex)
806 EXPORT_SYMBOL(dev_get_by_index_rcu);
810 * dev_get_by_index - find a device by its ifindex
811 * @net: the applicable net namespace
812 * @ifindex: index of device
814 * Search for an interface by index. Returns NULL if the device
815 * is not found or a pointer to the device. The device returned has
816 * had a reference added and the pointer is safe until the user calls
817 * dev_put to indicate they have finished with it.
820 struct net_device *dev_get_by_index(struct net *net, int ifindex)
822 struct net_device *dev;
825 dev = dev_get_by_index_rcu(net, ifindex);
831 EXPORT_SYMBOL(dev_get_by_index);
834 * netdev_get_name - get a netdevice name, knowing its ifindex.
835 * @net: network namespace
836 * @name: a pointer to the buffer where the name will be stored.
837 * @ifindex: the ifindex of the interface to get the name from.
839 * The use of raw_seqcount_begin() and cond_resched() before
840 * retrying is required as we want to give the writers a chance
841 * to complete when CONFIG_PREEMPT is not set.
843 int netdev_get_name(struct net *net, char *name, int ifindex)
845 struct net_device *dev;
849 seq = raw_seqcount_begin(&devnet_rename_seq);
851 dev = dev_get_by_index_rcu(net, ifindex);
857 strcpy(name, dev->name);
859 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
868 * dev_getbyhwaddr_rcu - find a device by its hardware address
869 * @net: the applicable net namespace
870 * @type: media type of device
871 * @ha: hardware address
873 * Search for an interface by MAC address. Returns NULL if the device
874 * is not found or a pointer to the device.
875 * The caller must hold RCU or RTNL.
876 * The returned device has not had its ref count increased
877 * and the caller must therefore be careful about locking
881 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
884 struct net_device *dev;
886 for_each_netdev_rcu(net, dev)
887 if (dev->type == type &&
888 !memcmp(dev->dev_addr, ha, dev->addr_len))
893 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
895 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
897 struct net_device *dev;
900 for_each_netdev(net, dev)
901 if (dev->type == type)
906 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
908 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
910 struct net_device *dev, *ret = NULL;
913 for_each_netdev_rcu(net, dev)
914 if (dev->type == type) {
922 EXPORT_SYMBOL(dev_getfirstbyhwtype);
925 * __dev_get_by_flags - find any device with given flags
926 * @net: the applicable net namespace
927 * @if_flags: IFF_* values
928 * @mask: bitmask of bits in if_flags to check
930 * Search for any interface with the given flags. Returns NULL if a device
931 * is not found or a pointer to the device. Must be called inside
932 * rtnl_lock(), and result refcount is unchanged.
935 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
938 struct net_device *dev, *ret;
943 for_each_netdev(net, dev) {
944 if (((dev->flags ^ if_flags) & mask) == 0) {
951 EXPORT_SYMBOL(__dev_get_by_flags);
954 * dev_valid_name - check if name is okay for network device
957 * Network device names need to be valid file names to
958 * to allow sysfs to work. We also disallow any kind of
961 bool dev_valid_name(const char *name)
965 if (strlen(name) >= IFNAMSIZ)
967 if (!strcmp(name, ".") || !strcmp(name, ".."))
971 if (*name == '/' || *name == ':' || isspace(*name))
977 EXPORT_SYMBOL(dev_valid_name);
980 * __dev_alloc_name - allocate a name for a device
981 * @net: network namespace to allocate the device name in
982 * @name: name format string
983 * @buf: scratch buffer and result name string
985 * Passed a format string - eg "lt%d" it will try and find a suitable
986 * id. It scans list of devices to build up a free map, then chooses
987 * the first empty slot. The caller must hold the dev_base or rtnl lock
988 * while allocating the name and adding the device in order to avoid
990 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
991 * Returns the number of the unit assigned or a negative errno code.
994 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
998 const int max_netdevices = 8*PAGE_SIZE;
999 unsigned long *inuse;
1000 struct net_device *d;
1002 p = strnchr(name, IFNAMSIZ-1, '%');
1005 * Verify the string as this thing may have come from
1006 * the user. There must be either one "%d" and no other "%"
1009 if (p[1] != 'd' || strchr(p + 2, '%'))
1012 /* Use one page as a bit array of possible slots */
1013 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1017 for_each_netdev(net, d) {
1018 if (!sscanf(d->name, name, &i))
1020 if (i < 0 || i >= max_netdevices)
1023 /* avoid cases where sscanf is not exact inverse of printf */
1024 snprintf(buf, IFNAMSIZ, name, i);
1025 if (!strncmp(buf, d->name, IFNAMSIZ))
1029 i = find_first_zero_bit(inuse, max_netdevices);
1030 free_page((unsigned long) inuse);
1034 snprintf(buf, IFNAMSIZ, name, i);
1035 if (!__dev_get_by_name(net, buf))
1038 /* It is possible to run out of possible slots
1039 * when the name is long and there isn't enough space left
1040 * for the digits, or if all bits are used.
1046 * dev_alloc_name - allocate a name for a device
1048 * @name: name format string
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
1051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
1059 int dev_alloc_name(struct net_device *dev, const char *name)
1065 BUG_ON(!dev_net(dev));
1067 ret = __dev_alloc_name(net, name, buf);
1069 strlcpy(dev->name, buf, IFNAMSIZ);
1072 EXPORT_SYMBOL(dev_alloc_name);
1074 static int dev_alloc_name_ns(struct net *net,
1075 struct net_device *dev,
1081 ret = __dev_alloc_name(net, name, buf);
1083 strlcpy(dev->name, buf, IFNAMSIZ);
1087 static int dev_get_valid_name(struct net *net,
1088 struct net_device *dev,
1093 if (!dev_valid_name(name))
1096 if (strchr(name, '%'))
1097 return dev_alloc_name_ns(net, dev, name);
1098 else if (__dev_get_by_name(net, name))
1100 else if (dev->name != name)
1101 strlcpy(dev->name, name, IFNAMSIZ);
1107 * dev_change_name - change name of a device
1109 * @newname: name (or format string) must be at least IFNAMSIZ
1111 * Change name of a device, can pass format strings "eth%d".
1114 int dev_change_name(struct net_device *dev, const char *newname)
1116 unsigned char old_assign_type;
1117 char oldname[IFNAMSIZ];
1123 BUG_ON(!dev_net(dev));
1126 if (dev->flags & IFF_UP)
1129 write_seqcount_begin(&devnet_rename_seq);
1131 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1132 write_seqcount_end(&devnet_rename_seq);
1136 memcpy(oldname, dev->name, IFNAMSIZ);
1138 err = dev_get_valid_name(net, dev, newname);
1140 write_seqcount_end(&devnet_rename_seq);
1144 if (oldname[0] && !strchr(oldname, '%'))
1145 netdev_info(dev, "renamed from %s\n", oldname);
1147 old_assign_type = dev->name_assign_type;
1148 dev->name_assign_type = NET_NAME_RENAMED;
1151 ret = device_rename(&dev->dev, dev->name);
1153 memcpy(dev->name, oldname, IFNAMSIZ);
1154 dev->name_assign_type = old_assign_type;
1155 write_seqcount_end(&devnet_rename_seq);
1159 write_seqcount_end(&devnet_rename_seq);
1161 netdev_adjacent_rename_links(dev, oldname);
1163 write_lock_bh(&dev_base_lock);
1164 hlist_del_rcu(&dev->name_hlist);
1165 write_unlock_bh(&dev_base_lock);
1169 write_lock_bh(&dev_base_lock);
1170 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1171 write_unlock_bh(&dev_base_lock);
1173 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1174 ret = notifier_to_errno(ret);
1177 /* err >= 0 after dev_alloc_name() or stores the first errno */
1180 write_seqcount_begin(&devnet_rename_seq);
1181 memcpy(dev->name, oldname, IFNAMSIZ);
1182 memcpy(oldname, newname, IFNAMSIZ);
1183 dev->name_assign_type = old_assign_type;
1184 old_assign_type = NET_NAME_RENAMED;
1187 pr_err("%s: name change rollback failed: %d\n",
1196 * dev_set_alias - change ifalias of a device
1198 * @alias: name up to IFALIASZ
1199 * @len: limit of bytes to copy from info
1201 * Set ifalias for a device,
1203 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1209 if (len >= IFALIASZ)
1213 kfree(dev->ifalias);
1214 dev->ifalias = NULL;
1218 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1221 dev->ifalias = new_ifalias;
1223 strlcpy(dev->ifalias, alias, len+1);
1229 * netdev_features_change - device changes features
1230 * @dev: device to cause notification
1232 * Called to indicate a device has changed features.
1234 void netdev_features_change(struct net_device *dev)
1236 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1238 EXPORT_SYMBOL(netdev_features_change);
1241 * netdev_state_change - device changes state
1242 * @dev: device to cause notification
1244 * Called to indicate a device has changed state. This function calls
1245 * the notifier chains for netdev_chain and sends a NEWLINK message
1246 * to the routing socket.
1248 void netdev_state_change(struct net_device *dev)
1250 if (dev->flags & IFF_UP) {
1251 struct netdev_notifier_change_info change_info;
1253 change_info.flags_changed = 0;
1254 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1256 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1259 EXPORT_SYMBOL(netdev_state_change);
1262 * netdev_notify_peers - notify network peers about existence of @dev
1263 * @dev: network device
1265 * Generate traffic such that interested network peers are aware of
1266 * @dev, such as by generating a gratuitous ARP. This may be used when
1267 * a device wants to inform the rest of the network about some sort of
1268 * reconfiguration such as a failover event or virtual machine
1271 void netdev_notify_peers(struct net_device *dev)
1274 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1277 EXPORT_SYMBOL(netdev_notify_peers);
1279 static int __dev_open(struct net_device *dev)
1281 const struct net_device_ops *ops = dev->netdev_ops;
1286 if (!netif_device_present(dev))
1289 /* Block netpoll from trying to do any rx path servicing.
1290 * If we don't do this there is a chance ndo_poll_controller
1291 * or ndo_poll may be running while we open the device
1293 netpoll_poll_disable(dev);
1295 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1296 ret = notifier_to_errno(ret);
1300 set_bit(__LINK_STATE_START, &dev->state);
1302 if (ops->ndo_validate_addr)
1303 ret = ops->ndo_validate_addr(dev);
1305 if (!ret && ops->ndo_open)
1306 ret = ops->ndo_open(dev);
1308 netpoll_poll_enable(dev);
1311 clear_bit(__LINK_STATE_START, &dev->state);
1313 dev->flags |= IFF_UP;
1314 dev_set_rx_mode(dev);
1316 add_device_randomness(dev->dev_addr, dev->addr_len);
1323 * dev_open - prepare an interface for use.
1324 * @dev: device to open
1326 * Takes a device from down to up state. The device's private open
1327 * function is invoked and then the multicast lists are loaded. Finally
1328 * the device is moved into the up state and a %NETDEV_UP message is
1329 * sent to the netdev notifier chain.
1331 * Calling this function on an active interface is a nop. On a failure
1332 * a negative errno code is returned.
1334 int dev_open(struct net_device *dev)
1338 if (dev->flags & IFF_UP)
1341 ret = __dev_open(dev);
1345 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1346 call_netdevice_notifiers(NETDEV_UP, dev);
1350 EXPORT_SYMBOL(dev_open);
1352 static int __dev_close_many(struct list_head *head)
1354 struct net_device *dev;
1359 list_for_each_entry(dev, head, close_list) {
1360 /* Temporarily disable netpoll until the interface is down */
1361 netpoll_poll_disable(dev);
1363 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1365 clear_bit(__LINK_STATE_START, &dev->state);
1367 /* Synchronize to scheduled poll. We cannot touch poll list, it
1368 * can be even on different cpu. So just clear netif_running().
1370 * dev->stop() will invoke napi_disable() on all of it's
1371 * napi_struct instances on this device.
1373 smp_mb__after_atomic(); /* Commit netif_running(). */
1376 dev_deactivate_many(head);
1378 list_for_each_entry(dev, head, close_list) {
1379 const struct net_device_ops *ops = dev->netdev_ops;
1382 * Call the device specific close. This cannot fail.
1383 * Only if device is UP
1385 * We allow it to be called even after a DETACH hot-plug
1391 dev->flags &= ~IFF_UP;
1392 netpoll_poll_enable(dev);
1398 static int __dev_close(struct net_device *dev)
1403 list_add(&dev->close_list, &single);
1404 retval = __dev_close_many(&single);
1410 int dev_close_many(struct list_head *head, bool unlink)
1412 struct net_device *dev, *tmp;
1414 /* Remove the devices that don't need to be closed */
1415 list_for_each_entry_safe(dev, tmp, head, close_list)
1416 if (!(dev->flags & IFF_UP))
1417 list_del_init(&dev->close_list);
1419 __dev_close_many(head);
1421 list_for_each_entry_safe(dev, tmp, head, close_list) {
1422 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1423 call_netdevice_notifiers(NETDEV_DOWN, dev);
1425 list_del_init(&dev->close_list);
1430 EXPORT_SYMBOL(dev_close_many);
1433 * dev_close - shutdown an interface.
1434 * @dev: device to shutdown
1436 * This function moves an active device into down state. A
1437 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1438 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1441 int dev_close(struct net_device *dev)
1443 if (dev->flags & IFF_UP) {
1446 list_add(&dev->close_list, &single);
1447 dev_close_many(&single, true);
1452 EXPORT_SYMBOL(dev_close);
1456 * dev_disable_lro - disable Large Receive Offload on a device
1459 * Disable Large Receive Offload (LRO) on a net device. Must be
1460 * called under RTNL. This is needed if received packets may be
1461 * forwarded to another interface.
1463 void dev_disable_lro(struct net_device *dev)
1465 struct net_device *lower_dev;
1466 struct list_head *iter;
1468 dev->wanted_features &= ~NETIF_F_LRO;
1469 netdev_update_features(dev);
1471 if (unlikely(dev->features & NETIF_F_LRO))
1472 netdev_WARN(dev, "failed to disable LRO!\n");
1474 netdev_for_each_lower_dev(dev, lower_dev, iter)
1475 dev_disable_lro(lower_dev);
1477 EXPORT_SYMBOL(dev_disable_lro);
1479 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1480 struct net_device *dev)
1482 struct netdev_notifier_info info;
1484 netdev_notifier_info_init(&info, dev);
1485 return nb->notifier_call(nb, val, &info);
1488 static int dev_boot_phase = 1;
1491 * register_netdevice_notifier - register a network notifier block
1494 * Register a notifier to be called when network device events occur.
1495 * The notifier passed is linked into the kernel structures and must
1496 * not be reused until it has been unregistered. A negative errno code
1497 * is returned on a failure.
1499 * When registered all registration and up events are replayed
1500 * to the new notifier to allow device to have a race free
1501 * view of the network device list.
1504 int register_netdevice_notifier(struct notifier_block *nb)
1506 struct net_device *dev;
1507 struct net_device *last;
1512 err = raw_notifier_chain_register(&netdev_chain, nb);
1518 for_each_netdev(net, dev) {
1519 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1520 err = notifier_to_errno(err);
1524 if (!(dev->flags & IFF_UP))
1527 call_netdevice_notifier(nb, NETDEV_UP, dev);
1538 for_each_netdev(net, dev) {
1542 if (dev->flags & IFF_UP) {
1543 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1545 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1547 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1552 raw_notifier_chain_unregister(&netdev_chain, nb);
1555 EXPORT_SYMBOL(register_netdevice_notifier);
1558 * unregister_netdevice_notifier - unregister a network notifier block
1561 * Unregister a notifier previously registered by
1562 * register_netdevice_notifier(). The notifier is unlinked into the
1563 * kernel structures and may then be reused. A negative errno code
1564 * is returned on a failure.
1566 * After unregistering unregister and down device events are synthesized
1567 * for all devices on the device list to the removed notifier to remove
1568 * the need for special case cleanup code.
1571 int unregister_netdevice_notifier(struct notifier_block *nb)
1573 struct net_device *dev;
1578 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1583 for_each_netdev(net, dev) {
1584 if (dev->flags & IFF_UP) {
1585 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1587 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1589 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1596 EXPORT_SYMBOL(unregister_netdevice_notifier);
1599 * call_netdevice_notifiers_info - call all network notifier blocks
1600 * @val: value passed unmodified to notifier function
1601 * @dev: net_device pointer passed unmodified to notifier function
1602 * @info: notifier information data
1604 * Call all network notifier blocks. Parameters and return value
1605 * are as for raw_notifier_call_chain().
1608 static int call_netdevice_notifiers_info(unsigned long val,
1609 struct net_device *dev,
1610 struct netdev_notifier_info *info)
1613 netdev_notifier_info_init(info, dev);
1614 return raw_notifier_call_chain(&netdev_chain, val, info);
1618 * call_netdevice_notifiers - call all network notifier blocks
1619 * @val: value passed unmodified to notifier function
1620 * @dev: net_device pointer passed unmodified to notifier function
1622 * Call all network notifier blocks. Parameters and return value
1623 * are as for raw_notifier_call_chain().
1626 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1628 struct netdev_notifier_info info;
1630 return call_netdevice_notifiers_info(val, dev, &info);
1632 EXPORT_SYMBOL(call_netdevice_notifiers);
1634 #ifdef CONFIG_NET_INGRESS
1635 static struct static_key ingress_needed __read_mostly;
1637 void net_inc_ingress_queue(void)
1639 static_key_slow_inc(&ingress_needed);
1641 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1643 void net_dec_ingress_queue(void)
1645 static_key_slow_dec(&ingress_needed);
1647 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1650 static struct static_key netstamp_needed __read_mostly;
1651 #ifdef HAVE_JUMP_LABEL
1652 /* We are not allowed to call static_key_slow_dec() from irq context
1653 * If net_disable_timestamp() is called from irq context, defer the
1654 * static_key_slow_dec() calls.
1656 static atomic_t netstamp_needed_deferred;
1659 void net_enable_timestamp(void)
1661 #ifdef HAVE_JUMP_LABEL
1662 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1666 static_key_slow_dec(&netstamp_needed);
1670 static_key_slow_inc(&netstamp_needed);
1672 EXPORT_SYMBOL(net_enable_timestamp);
1674 void net_disable_timestamp(void)
1676 #ifdef HAVE_JUMP_LABEL
1677 if (in_interrupt()) {
1678 atomic_inc(&netstamp_needed_deferred);
1682 static_key_slow_dec(&netstamp_needed);
1684 EXPORT_SYMBOL(net_disable_timestamp);
1686 static inline void net_timestamp_set(struct sk_buff *skb)
1688 skb->tstamp.tv64 = 0;
1689 if (static_key_false(&netstamp_needed))
1690 __net_timestamp(skb);
1693 #define net_timestamp_check(COND, SKB) \
1694 if (static_key_false(&netstamp_needed)) { \
1695 if ((COND) && !(SKB)->tstamp.tv64) \
1696 __net_timestamp(SKB); \
1699 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1703 if (!(dev->flags & IFF_UP))
1706 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1707 if (skb->len <= len)
1710 /* if TSO is enabled, we don't care about the length as the packet
1711 * could be forwarded without being segmented before
1713 if (skb_is_gso(skb))
1718 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1720 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1722 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1723 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1724 atomic_long_inc(&dev->rx_dropped);
1730 if (unlikely(!is_skb_forwardable(dev, skb))) {
1731 atomic_long_inc(&dev->rx_dropped);
1736 skb_scrub_packet(skb, true);
1738 skb->protocol = eth_type_trans(skb, dev);
1739 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1743 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1746 * dev_forward_skb - loopback an skb to another netif
1748 * @dev: destination network device
1749 * @skb: buffer to forward
1752 * NET_RX_SUCCESS (no congestion)
1753 * NET_RX_DROP (packet was dropped, but freed)
1755 * dev_forward_skb can be used for injecting an skb from the
1756 * start_xmit function of one device into the receive queue
1757 * of another device.
1759 * The receiving device may be in another namespace, so
1760 * we have to clear all information in the skb that could
1761 * impact namespace isolation.
1763 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1765 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1767 EXPORT_SYMBOL_GPL(dev_forward_skb);
1769 static inline int deliver_skb(struct sk_buff *skb,
1770 struct packet_type *pt_prev,
1771 struct net_device *orig_dev)
1773 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1775 atomic_inc(&skb->users);
1776 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1779 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1780 struct packet_type **pt,
1781 struct net_device *orig_dev,
1783 struct list_head *ptype_list)
1785 struct packet_type *ptype, *pt_prev = *pt;
1787 list_for_each_entry_rcu(ptype, ptype_list, list) {
1788 if (ptype->type != type)
1791 deliver_skb(skb, pt_prev, orig_dev);
1797 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1799 if (!ptype->af_packet_priv || !skb->sk)
1802 if (ptype->id_match)
1803 return ptype->id_match(ptype, skb->sk);
1804 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1811 * Support routine. Sends outgoing frames to any network
1812 * taps currently in use.
1815 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1817 struct packet_type *ptype;
1818 struct sk_buff *skb2 = NULL;
1819 struct packet_type *pt_prev = NULL;
1820 struct list_head *ptype_list = &ptype_all;
1824 list_for_each_entry_rcu(ptype, ptype_list, list) {
1825 /* Never send packets back to the socket
1826 * they originated from - MvS (miquels@drinkel.ow.org)
1828 if (skb_loop_sk(ptype, skb))
1832 deliver_skb(skb2, pt_prev, skb->dev);
1837 /* need to clone skb, done only once */
1838 skb2 = skb_clone(skb, GFP_ATOMIC);
1842 net_timestamp_set(skb2);
1844 /* skb->nh should be correctly
1845 * set by sender, so that the second statement is
1846 * just protection against buggy protocols.
1848 skb_reset_mac_header(skb2);
1850 if (skb_network_header(skb2) < skb2->data ||
1851 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1852 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1853 ntohs(skb2->protocol),
1855 skb_reset_network_header(skb2);
1858 skb2->transport_header = skb2->network_header;
1859 skb2->pkt_type = PACKET_OUTGOING;
1863 if (ptype_list == &ptype_all) {
1864 ptype_list = &dev->ptype_all;
1869 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1874 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1875 * @dev: Network device
1876 * @txq: number of queues available
1878 * If real_num_tx_queues is changed the tc mappings may no longer be
1879 * valid. To resolve this verify the tc mapping remains valid and if
1880 * not NULL the mapping. With no priorities mapping to this
1881 * offset/count pair it will no longer be used. In the worst case TC0
1882 * is invalid nothing can be done so disable priority mappings. If is
1883 * expected that drivers will fix this mapping if they can before
1884 * calling netif_set_real_num_tx_queues.
1886 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1889 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1891 /* If TC0 is invalidated disable TC mapping */
1892 if (tc->offset + tc->count > txq) {
1893 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1898 /* Invalidated prio to tc mappings set to TC0 */
1899 for (i = 1; i < TC_BITMASK + 1; i++) {
1900 int q = netdev_get_prio_tc_map(dev, i);
1902 tc = &dev->tc_to_txq[q];
1903 if (tc->offset + tc->count > txq) {
1904 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1906 netdev_set_prio_tc_map(dev, i, 0);
1912 static DEFINE_MUTEX(xps_map_mutex);
1913 #define xmap_dereference(P) \
1914 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1916 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1919 struct xps_map *map = NULL;
1923 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1925 for (pos = 0; map && pos < map->len; pos++) {
1926 if (map->queues[pos] == index) {
1928 map->queues[pos] = map->queues[--map->len];
1930 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1931 kfree_rcu(map, rcu);
1941 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1943 struct xps_dev_maps *dev_maps;
1945 bool active = false;
1947 mutex_lock(&xps_map_mutex);
1948 dev_maps = xmap_dereference(dev->xps_maps);
1953 for_each_possible_cpu(cpu) {
1954 for (i = index; i < dev->num_tx_queues; i++) {
1955 if (!remove_xps_queue(dev_maps, cpu, i))
1958 if (i == dev->num_tx_queues)
1963 RCU_INIT_POINTER(dev->xps_maps, NULL);
1964 kfree_rcu(dev_maps, rcu);
1967 for (i = index; i < dev->num_tx_queues; i++)
1968 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1972 mutex_unlock(&xps_map_mutex);
1975 static struct xps_map *expand_xps_map(struct xps_map *map,
1978 struct xps_map *new_map;
1979 int alloc_len = XPS_MIN_MAP_ALLOC;
1982 for (pos = 0; map && pos < map->len; pos++) {
1983 if (map->queues[pos] != index)
1988 /* Need to add queue to this CPU's existing map */
1990 if (pos < map->alloc_len)
1993 alloc_len = map->alloc_len * 2;
1996 /* Need to allocate new map to store queue on this CPU's map */
1997 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2002 for (i = 0; i < pos; i++)
2003 new_map->queues[i] = map->queues[i];
2004 new_map->alloc_len = alloc_len;
2010 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2013 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2014 struct xps_map *map, *new_map;
2015 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
2016 int cpu, numa_node_id = -2;
2017 bool active = false;
2019 mutex_lock(&xps_map_mutex);
2021 dev_maps = xmap_dereference(dev->xps_maps);
2023 /* allocate memory for queue storage */
2024 for_each_online_cpu(cpu) {
2025 if (!cpumask_test_cpu(cpu, mask))
2029 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2030 if (!new_dev_maps) {
2031 mutex_unlock(&xps_map_mutex);
2035 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2038 map = expand_xps_map(map, cpu, index);
2042 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2046 goto out_no_new_maps;
2048 for_each_possible_cpu(cpu) {
2049 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2050 /* add queue to CPU maps */
2053 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2054 while ((pos < map->len) && (map->queues[pos] != index))
2057 if (pos == map->len)
2058 map->queues[map->len++] = index;
2060 if (numa_node_id == -2)
2061 numa_node_id = cpu_to_node(cpu);
2062 else if (numa_node_id != cpu_to_node(cpu))
2065 } else if (dev_maps) {
2066 /* fill in the new device map from the old device map */
2067 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2068 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2073 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2075 /* Cleanup old maps */
2077 for_each_possible_cpu(cpu) {
2078 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2079 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2080 if (map && map != new_map)
2081 kfree_rcu(map, rcu);
2084 kfree_rcu(dev_maps, rcu);
2087 dev_maps = new_dev_maps;
2091 /* update Tx queue numa node */
2092 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2093 (numa_node_id >= 0) ? numa_node_id :
2099 /* removes queue from unused CPUs */
2100 for_each_possible_cpu(cpu) {
2101 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2104 if (remove_xps_queue(dev_maps, cpu, index))
2108 /* free map if not active */
2110 RCU_INIT_POINTER(dev->xps_maps, NULL);
2111 kfree_rcu(dev_maps, rcu);
2115 mutex_unlock(&xps_map_mutex);
2119 /* remove any maps that we added */
2120 for_each_possible_cpu(cpu) {
2121 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2122 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2124 if (new_map && new_map != map)
2128 mutex_unlock(&xps_map_mutex);
2130 kfree(new_dev_maps);
2133 EXPORT_SYMBOL(netif_set_xps_queue);
2137 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2138 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2140 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2144 if (txq < 1 || txq > dev->num_tx_queues)
2147 if (dev->reg_state == NETREG_REGISTERED ||
2148 dev->reg_state == NETREG_UNREGISTERING) {
2151 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2157 netif_setup_tc(dev, txq);
2159 if (txq < dev->real_num_tx_queues) {
2160 qdisc_reset_all_tx_gt(dev, txq);
2162 netif_reset_xps_queues_gt(dev, txq);
2167 dev->real_num_tx_queues = txq;
2170 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2174 * netif_set_real_num_rx_queues - set actual number of RX queues used
2175 * @dev: Network device
2176 * @rxq: Actual number of RX queues
2178 * This must be called either with the rtnl_lock held or before
2179 * registration of the net device. Returns 0 on success, or a
2180 * negative error code. If called before registration, it always
2183 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2187 if (rxq < 1 || rxq > dev->num_rx_queues)
2190 if (dev->reg_state == NETREG_REGISTERED) {
2193 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2199 dev->real_num_rx_queues = rxq;
2202 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2206 * netif_get_num_default_rss_queues - default number of RSS queues
2208 * This routine should set an upper limit on the number of RSS queues
2209 * used by default by multiqueue devices.
2211 int netif_get_num_default_rss_queues(void)
2213 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2215 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2217 static inline void __netif_reschedule(struct Qdisc *q)
2219 struct softnet_data *sd;
2220 unsigned long flags;
2222 local_irq_save(flags);
2223 sd = this_cpu_ptr(&softnet_data);
2224 q->next_sched = NULL;
2225 *sd->output_queue_tailp = q;
2226 sd->output_queue_tailp = &q->next_sched;
2227 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2228 local_irq_restore(flags);
2231 void __netif_schedule(struct Qdisc *q)
2233 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2234 __netif_reschedule(q);
2236 EXPORT_SYMBOL(__netif_schedule);
2238 struct dev_kfree_skb_cb {
2239 enum skb_free_reason reason;
2242 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2244 return (struct dev_kfree_skb_cb *)skb->cb;
2247 void netif_schedule_queue(struct netdev_queue *txq)
2250 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2251 struct Qdisc *q = rcu_dereference(txq->qdisc);
2253 __netif_schedule(q);
2257 EXPORT_SYMBOL(netif_schedule_queue);
2260 * netif_wake_subqueue - allow sending packets on subqueue
2261 * @dev: network device
2262 * @queue_index: sub queue index
2264 * Resume individual transmit queue of a device with multiple transmit queues.
2266 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2268 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2270 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2274 q = rcu_dereference(txq->qdisc);
2275 __netif_schedule(q);
2279 EXPORT_SYMBOL(netif_wake_subqueue);
2281 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2283 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2287 q = rcu_dereference(dev_queue->qdisc);
2288 __netif_schedule(q);
2292 EXPORT_SYMBOL(netif_tx_wake_queue);
2294 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2296 unsigned long flags;
2298 if (likely(atomic_read(&skb->users) == 1)) {
2300 atomic_set(&skb->users, 0);
2301 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2304 get_kfree_skb_cb(skb)->reason = reason;
2305 local_irq_save(flags);
2306 skb->next = __this_cpu_read(softnet_data.completion_queue);
2307 __this_cpu_write(softnet_data.completion_queue, skb);
2308 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2309 local_irq_restore(flags);
2311 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2313 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2315 if (in_irq() || irqs_disabled())
2316 __dev_kfree_skb_irq(skb, reason);
2320 EXPORT_SYMBOL(__dev_kfree_skb_any);
2324 * netif_device_detach - mark device as removed
2325 * @dev: network device
2327 * Mark device as removed from system and therefore no longer available.
2329 void netif_device_detach(struct net_device *dev)
2331 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2332 netif_running(dev)) {
2333 netif_tx_stop_all_queues(dev);
2336 EXPORT_SYMBOL(netif_device_detach);
2339 * netif_device_attach - mark device as attached
2340 * @dev: network device
2342 * Mark device as attached from system and restart if needed.
2344 void netif_device_attach(struct net_device *dev)
2346 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2347 netif_running(dev)) {
2348 netif_tx_wake_all_queues(dev);
2349 __netdev_watchdog_up(dev);
2352 EXPORT_SYMBOL(netif_device_attach);
2355 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2356 * to be used as a distribution range.
2358 u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2359 unsigned int num_tx_queues)
2363 u16 qcount = num_tx_queues;
2365 if (skb_rx_queue_recorded(skb)) {
2366 hash = skb_get_rx_queue(skb);
2367 while (unlikely(hash >= num_tx_queues))
2368 hash -= num_tx_queues;
2373 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2374 qoffset = dev->tc_to_txq[tc].offset;
2375 qcount = dev->tc_to_txq[tc].count;
2378 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2380 EXPORT_SYMBOL(__skb_tx_hash);
2382 static void skb_warn_bad_offload(const struct sk_buff *skb)
2384 static const netdev_features_t null_features = 0;
2385 struct net_device *dev = skb->dev;
2386 const char *driver = "";
2388 if (!net_ratelimit())
2391 if (dev && dev->dev.parent)
2392 driver = dev_driver_string(dev->dev.parent);
2394 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2395 "gso_type=%d ip_summed=%d\n",
2396 driver, dev ? &dev->features : &null_features,
2397 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2398 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2399 skb_shinfo(skb)->gso_type, skb->ip_summed);
2403 * Invalidate hardware checksum when packet is to be mangled, and
2404 * complete checksum manually on outgoing path.
2406 int skb_checksum_help(struct sk_buff *skb)
2409 int ret = 0, offset;
2411 if (skb->ip_summed == CHECKSUM_COMPLETE)
2412 goto out_set_summed;
2414 if (unlikely(skb_shinfo(skb)->gso_size)) {
2415 skb_warn_bad_offload(skb);
2419 /* Before computing a checksum, we should make sure no frag could
2420 * be modified by an external entity : checksum could be wrong.
2422 if (skb_has_shared_frag(skb)) {
2423 ret = __skb_linearize(skb);
2428 offset = skb_checksum_start_offset(skb);
2429 BUG_ON(offset >= skb_headlen(skb));
2430 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2432 offset += skb->csum_offset;
2433 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2435 if (skb_cloned(skb) &&
2436 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2437 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2442 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2444 skb->ip_summed = CHECKSUM_NONE;
2448 EXPORT_SYMBOL(skb_checksum_help);
2450 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2452 __be16 type = skb->protocol;
2454 /* Tunnel gso handlers can set protocol to ethernet. */
2455 if (type == htons(ETH_P_TEB)) {
2458 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2461 eth = (struct ethhdr *)skb_mac_header(skb);
2462 type = eth->h_proto;
2465 return __vlan_get_protocol(skb, type, depth);
2469 * skb_mac_gso_segment - mac layer segmentation handler.
2470 * @skb: buffer to segment
2471 * @features: features for the output path (see dev->features)
2473 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2474 netdev_features_t features)
2476 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2477 struct packet_offload *ptype;
2478 int vlan_depth = skb->mac_len;
2479 __be16 type = skb_network_protocol(skb, &vlan_depth);
2481 if (unlikely(!type))
2482 return ERR_PTR(-EINVAL);
2484 __skb_pull(skb, vlan_depth);
2487 list_for_each_entry_rcu(ptype, &offload_base, list) {
2488 if (ptype->type == type && ptype->callbacks.gso_segment) {
2489 segs = ptype->callbacks.gso_segment(skb, features);
2495 __skb_push(skb, skb->data - skb_mac_header(skb));
2499 EXPORT_SYMBOL(skb_mac_gso_segment);
2502 /* openvswitch calls this on rx path, so we need a different check.
2504 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2507 return skb->ip_summed != CHECKSUM_PARTIAL;
2509 return skb->ip_summed == CHECKSUM_NONE;
2513 * __skb_gso_segment - Perform segmentation on skb.
2514 * @skb: buffer to segment
2515 * @features: features for the output path (see dev->features)
2516 * @tx_path: whether it is called in TX path
2518 * This function segments the given skb and returns a list of segments.
2520 * It may return NULL if the skb requires no segmentation. This is
2521 * only possible when GSO is used for verifying header integrity.
2523 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2524 netdev_features_t features, bool tx_path)
2526 if (unlikely(skb_needs_check(skb, tx_path))) {
2529 skb_warn_bad_offload(skb);
2531 err = skb_cow_head(skb, 0);
2533 return ERR_PTR(err);
2536 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2537 SKB_GSO_CB(skb)->encap_level = 0;
2539 skb_reset_mac_header(skb);
2540 skb_reset_mac_len(skb);
2542 return skb_mac_gso_segment(skb, features);
2544 EXPORT_SYMBOL(__skb_gso_segment);
2546 /* Take action when hardware reception checksum errors are detected. */
2548 void netdev_rx_csum_fault(struct net_device *dev)
2550 if (net_ratelimit()) {
2551 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2555 EXPORT_SYMBOL(netdev_rx_csum_fault);
2558 /* Actually, we should eliminate this check as soon as we know, that:
2559 * 1. IOMMU is present and allows to map all the memory.
2560 * 2. No high memory really exists on this machine.
2563 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2565 #ifdef CONFIG_HIGHMEM
2567 if (!(dev->features & NETIF_F_HIGHDMA)) {
2568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2569 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2570 if (PageHighMem(skb_frag_page(frag)))
2575 if (PCI_DMA_BUS_IS_PHYS) {
2576 struct device *pdev = dev->dev.parent;
2580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2581 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2582 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2583 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2591 /* If MPLS offload request, verify we are testing hardware MPLS features
2592 * instead of standard features for the netdev.
2594 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2595 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2596 netdev_features_t features,
2599 if (eth_p_mpls(type))
2600 features &= skb->dev->mpls_features;
2605 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2606 netdev_features_t features,
2613 static netdev_features_t harmonize_features(struct sk_buff *skb,
2614 netdev_features_t features)
2619 type = skb_network_protocol(skb, &tmp);
2620 features = net_mpls_features(skb, features, type);
2622 if (skb->ip_summed != CHECKSUM_NONE &&
2623 !can_checksum_protocol(features, type)) {
2624 features &= ~NETIF_F_ALL_CSUM;
2625 } else if (illegal_highdma(skb->dev, skb)) {
2626 features &= ~NETIF_F_SG;
2632 netdev_features_t passthru_features_check(struct sk_buff *skb,
2633 struct net_device *dev,
2634 netdev_features_t features)
2638 EXPORT_SYMBOL(passthru_features_check);
2640 static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2641 struct net_device *dev,
2642 netdev_features_t features)
2644 return vlan_features_check(skb, features);
2647 netdev_features_t netif_skb_features(struct sk_buff *skb)
2649 struct net_device *dev = skb->dev;
2650 netdev_features_t features = dev->features;
2651 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2653 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2654 features &= ~NETIF_F_GSO_MASK;
2656 /* If encapsulation offload request, verify we are testing
2657 * hardware encapsulation features instead of standard
2658 * features for the netdev
2660 if (skb->encapsulation)
2661 features &= dev->hw_enc_features;
2663 if (skb_vlan_tagged(skb))
2664 features = netdev_intersect_features(features,
2665 dev->vlan_features |
2666 NETIF_F_HW_VLAN_CTAG_TX |
2667 NETIF_F_HW_VLAN_STAG_TX);
2669 if (dev->netdev_ops->ndo_features_check)
2670 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2673 features &= dflt_features_check(skb, dev, features);
2675 return harmonize_features(skb, features);
2677 EXPORT_SYMBOL(netif_skb_features);
2679 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2680 struct netdev_queue *txq, bool more)
2685 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2686 dev_queue_xmit_nit(skb, dev);
2689 trace_net_dev_start_xmit(skb, dev);
2690 rc = netdev_start_xmit(skb, dev, txq, more);
2691 trace_net_dev_xmit(skb, rc, dev, len);
2696 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2697 struct netdev_queue *txq, int *ret)
2699 struct sk_buff *skb = first;
2700 int rc = NETDEV_TX_OK;
2703 struct sk_buff *next = skb->next;
2706 rc = xmit_one(skb, dev, txq, next != NULL);
2707 if (unlikely(!dev_xmit_complete(rc))) {
2713 if (netif_xmit_stopped(txq) && skb) {
2714 rc = NETDEV_TX_BUSY;
2724 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2725 netdev_features_t features)
2727 if (skb_vlan_tag_present(skb) &&
2728 !vlan_hw_offload_capable(features, skb->vlan_proto))
2729 skb = __vlan_hwaccel_push_inside(skb);
2733 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2735 netdev_features_t features;
2740 features = netif_skb_features(skb);
2741 skb = validate_xmit_vlan(skb, features);
2745 if (netif_needs_gso(skb, features)) {
2746 struct sk_buff *segs;
2748 segs = skb_gso_segment(skb, features);
2756 if (skb_needs_linearize(skb, features) &&
2757 __skb_linearize(skb))
2760 /* If packet is not checksummed and device does not
2761 * support checksumming for this protocol, complete
2762 * checksumming here.
2764 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2765 if (skb->encapsulation)
2766 skb_set_inner_transport_header(skb,
2767 skb_checksum_start_offset(skb));
2769 skb_set_transport_header(skb,
2770 skb_checksum_start_offset(skb));
2771 if (!(features & NETIF_F_ALL_CSUM) &&
2772 skb_checksum_help(skb))
2785 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2787 struct sk_buff *next, *head = NULL, *tail;
2789 for (; skb != NULL; skb = next) {
2793 /* in case skb wont be segmented, point to itself */
2796 skb = validate_xmit_skb(skb, dev);
2804 /* If skb was segmented, skb->prev points to
2805 * the last segment. If not, it still contains skb.
2812 static void qdisc_pkt_len_init(struct sk_buff *skb)
2814 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2816 qdisc_skb_cb(skb)->pkt_len = skb->len;
2818 /* To get more precise estimation of bytes sent on wire,
2819 * we add to pkt_len the headers size of all segments
2821 if (shinfo->gso_size) {
2822 unsigned int hdr_len;
2823 u16 gso_segs = shinfo->gso_segs;
2825 /* mac layer + network layer */
2826 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2828 /* + transport layer */
2829 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2830 hdr_len += tcp_hdrlen(skb);
2832 hdr_len += sizeof(struct udphdr);
2834 if (shinfo->gso_type & SKB_GSO_DODGY)
2835 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2838 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2842 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2843 struct net_device *dev,
2844 struct netdev_queue *txq)
2846 spinlock_t *root_lock = qdisc_lock(q);
2850 qdisc_pkt_len_init(skb);
2851 qdisc_calculate_pkt_len(skb, q);
2853 * Heuristic to force contended enqueues to serialize on a
2854 * separate lock before trying to get qdisc main lock.
2855 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2856 * often and dequeue packets faster.
2858 contended = qdisc_is_running(q);
2859 if (unlikely(contended))
2860 spin_lock(&q->busylock);
2862 spin_lock(root_lock);
2863 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2866 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2867 qdisc_run_begin(q)) {
2869 * This is a work-conserving queue; there are no old skbs
2870 * waiting to be sent out; and the qdisc is not running -
2871 * xmit the skb directly.
2874 qdisc_bstats_update(q, skb);
2876 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2877 if (unlikely(contended)) {
2878 spin_unlock(&q->busylock);
2885 rc = NET_XMIT_SUCCESS;
2887 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2888 if (qdisc_run_begin(q)) {
2889 if (unlikely(contended)) {
2890 spin_unlock(&q->busylock);
2896 spin_unlock(root_lock);
2897 if (unlikely(contended))
2898 spin_unlock(&q->busylock);
2902 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2903 static void skb_update_prio(struct sk_buff *skb)
2905 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2907 if (!skb->priority && skb->sk && map) {
2908 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2910 if (prioidx < map->priomap_len)
2911 skb->priority = map->priomap[prioidx];
2915 #define skb_update_prio(skb)
2918 DEFINE_PER_CPU(int, xmit_recursion);
2919 EXPORT_SYMBOL(xmit_recursion);
2921 #define RECURSION_LIMIT 10
2924 * dev_loopback_xmit - loop back @skb
2925 * @skb: buffer to transmit
2927 int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
2929 skb_reset_mac_header(skb);
2930 __skb_pull(skb, skb_network_offset(skb));
2931 skb->pkt_type = PACKET_LOOPBACK;
2932 skb->ip_summed = CHECKSUM_UNNECESSARY;
2933 WARN_ON(!skb_dst(skb));
2938 EXPORT_SYMBOL(dev_loopback_xmit);
2940 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2943 struct xps_dev_maps *dev_maps;
2944 struct xps_map *map;
2945 int queue_index = -1;
2948 dev_maps = rcu_dereference(dev->xps_maps);
2950 map = rcu_dereference(
2951 dev_maps->cpu_map[skb->sender_cpu - 1]);
2954 queue_index = map->queues[0];
2956 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
2958 if (unlikely(queue_index >= dev->real_num_tx_queues))
2970 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2972 struct sock *sk = skb->sk;
2973 int queue_index = sk_tx_queue_get(sk);
2975 if (queue_index < 0 || skb->ooo_okay ||
2976 queue_index >= dev->real_num_tx_queues) {
2977 int new_index = get_xps_queue(dev, skb);
2979 new_index = skb_tx_hash(dev, skb);
2981 if (queue_index != new_index && sk &&
2982 rcu_access_pointer(sk->sk_dst_cache))
2983 sk_tx_queue_set(sk, new_index);
2985 queue_index = new_index;
2991 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2992 struct sk_buff *skb,
2995 int queue_index = 0;
2998 if (skb->sender_cpu == 0)
2999 skb->sender_cpu = raw_smp_processor_id() + 1;
3002 if (dev->real_num_tx_queues != 1) {
3003 const struct net_device_ops *ops = dev->netdev_ops;
3004 if (ops->ndo_select_queue)
3005 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3008 queue_index = __netdev_pick_tx(dev, skb);
3011 queue_index = netdev_cap_txqueue(dev, queue_index);
3014 skb_set_queue_mapping(skb, queue_index);
3015 return netdev_get_tx_queue(dev, queue_index);
3019 * __dev_queue_xmit - transmit a buffer
3020 * @skb: buffer to transmit
3021 * @accel_priv: private data used for L2 forwarding offload
3023 * Queue a buffer for transmission to a network device. The caller must
3024 * have set the device and priority and built the buffer before calling
3025 * this function. The function can be called from an interrupt.
3027 * A negative errno code is returned on a failure. A success does not
3028 * guarantee the frame will be transmitted as it may be dropped due
3029 * to congestion or traffic shaping.
3031 * -----------------------------------------------------------------------------------
3032 * I notice this method can also return errors from the queue disciplines,
3033 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3036 * Regardless of the return value, the skb is consumed, so it is currently
3037 * difficult to retry a send to this method. (You can bump the ref count
3038 * before sending to hold a reference for retry if you are careful.)
3040 * When calling this method, interrupts MUST be enabled. This is because
3041 * the BH enable code must have IRQs enabled so that it will not deadlock.
3044 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
3046 struct net_device *dev = skb->dev;
3047 struct netdev_queue *txq;
3051 skb_reset_mac_header(skb);
3053 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3054 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3056 /* Disable soft irqs for various locks below. Also
3057 * stops preemption for RCU.
3061 skb_update_prio(skb);
3063 /* If device/qdisc don't need skb->dst, release it right now while
3064 * its hot in this cpu cache.
3066 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3071 txq = netdev_pick_tx(dev, skb, accel_priv);
3072 q = rcu_dereference_bh(txq->qdisc);
3074 #ifdef CONFIG_NET_CLS_ACT
3075 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
3077 trace_net_dev_queue(skb);
3079 rc = __dev_xmit_skb(skb, q, dev, txq);
3083 /* The device has no queue. Common case for software devices:
3084 loopback, all the sorts of tunnels...
3086 Really, it is unlikely that netif_tx_lock protection is necessary
3087 here. (f.e. loopback and IP tunnels are clean ignoring statistics
3089 However, it is possible, that they rely on protection
3092 Check this and shot the lock. It is not prone from deadlocks.
3093 Either shot noqueue qdisc, it is even simpler 8)
3095 if (dev->flags & IFF_UP) {
3096 int cpu = smp_processor_id(); /* ok because BHs are off */
3098 if (txq->xmit_lock_owner != cpu) {
3100 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3101 goto recursion_alert;
3103 skb = validate_xmit_skb(skb, dev);
3107 HARD_TX_LOCK(dev, txq, cpu);
3109 if (!netif_xmit_stopped(txq)) {
3110 __this_cpu_inc(xmit_recursion);
3111 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3112 __this_cpu_dec(xmit_recursion);
3113 if (dev_xmit_complete(rc)) {
3114 HARD_TX_UNLOCK(dev, txq);
3118 HARD_TX_UNLOCK(dev, txq);
3119 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3122 /* Recursion is detected! It is possible,
3126 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3133 rcu_read_unlock_bh();
3135 atomic_long_inc(&dev->tx_dropped);
3136 kfree_skb_list(skb);
3139 rcu_read_unlock_bh();
3143 int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
3145 return __dev_queue_xmit(skb, NULL);
3147 EXPORT_SYMBOL(dev_queue_xmit_sk);
3149 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3151 return __dev_queue_xmit(skb, accel_priv);
3153 EXPORT_SYMBOL(dev_queue_xmit_accel);
3156 /*=======================================================================
3158 =======================================================================*/
3160 int netdev_max_backlog __read_mostly = 1000;
3161 EXPORT_SYMBOL(netdev_max_backlog);
3163 int netdev_tstamp_prequeue __read_mostly = 1;
3164 int netdev_budget __read_mostly = 300;
3165 int weight_p __read_mostly = 64; /* old backlog weight */
3167 /* Called with irq disabled */
3168 static inline void ____napi_schedule(struct softnet_data *sd,
3169 struct napi_struct *napi)
3171 list_add_tail(&napi->poll_list, &sd->poll_list);
3172 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3177 /* One global table that all flow-based protocols share. */
3178 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3179 EXPORT_SYMBOL(rps_sock_flow_table);
3180 u32 rps_cpu_mask __read_mostly;
3181 EXPORT_SYMBOL(rps_cpu_mask);
3183 struct static_key rps_needed __read_mostly;
3185 static struct rps_dev_flow *
3186 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3187 struct rps_dev_flow *rflow, u16 next_cpu)
3189 if (next_cpu < nr_cpu_ids) {
3190 #ifdef CONFIG_RFS_ACCEL
3191 struct netdev_rx_queue *rxqueue;
3192 struct rps_dev_flow_table *flow_table;
3193 struct rps_dev_flow *old_rflow;
3198 /* Should we steer this flow to a different hardware queue? */
3199 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3200 !(dev->features & NETIF_F_NTUPLE))
3202 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3203 if (rxq_index == skb_get_rx_queue(skb))
3206 rxqueue = dev->_rx + rxq_index;
3207 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3210 flow_id = skb_get_hash(skb) & flow_table->mask;
3211 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3212 rxq_index, flow_id);
3216 rflow = &flow_table->flows[flow_id];
3218 if (old_rflow->filter == rflow->filter)
3219 old_rflow->filter = RPS_NO_FILTER;
3223 per_cpu(softnet_data, next_cpu).input_queue_head;
3226 rflow->cpu = next_cpu;
3231 * get_rps_cpu is called from netif_receive_skb and returns the target
3232 * CPU from the RPS map of the receiving queue for a given skb.
3233 * rcu_read_lock must be held on entry.
3235 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3236 struct rps_dev_flow **rflowp)
3238 const struct rps_sock_flow_table *sock_flow_table;
3239 struct netdev_rx_queue *rxqueue = dev->_rx;
3240 struct rps_dev_flow_table *flow_table;
3241 struct rps_map *map;
3246 if (skb_rx_queue_recorded(skb)) {
3247 u16 index = skb_get_rx_queue(skb);
3249 if (unlikely(index >= dev->real_num_rx_queues)) {
3250 WARN_ONCE(dev->real_num_rx_queues > 1,
3251 "%s received packet on queue %u, but number "
3252 "of RX queues is %u\n",
3253 dev->name, index, dev->real_num_rx_queues);
3259 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3261 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3262 map = rcu_dereference(rxqueue->rps_map);
3263 if (!flow_table && !map)
3266 skb_reset_network_header(skb);
3267 hash = skb_get_hash(skb);
3271 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3272 if (flow_table && sock_flow_table) {
3273 struct rps_dev_flow *rflow;
3277 /* First check into global flow table if there is a match */
3278 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3279 if ((ident ^ hash) & ~rps_cpu_mask)
3282 next_cpu = ident & rps_cpu_mask;
3284 /* OK, now we know there is a match,
3285 * we can look at the local (per receive queue) flow table
3287 rflow = &flow_table->flows[hash & flow_table->mask];
3291 * If the desired CPU (where last recvmsg was done) is
3292 * different from current CPU (one in the rx-queue flow
3293 * table entry), switch if one of the following holds:
3294 * - Current CPU is unset (>= nr_cpu_ids).
3295 * - Current CPU is offline.
3296 * - The current CPU's queue tail has advanced beyond the
3297 * last packet that was enqueued using this table entry.
3298 * This guarantees that all previous packets for the flow
3299 * have been dequeued, thus preserving in order delivery.
3301 if (unlikely(tcpu != next_cpu) &&
3302 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3303 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3304 rflow->last_qtail)) >= 0)) {
3306 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3309 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3319 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3320 if (cpu_online(tcpu)) {
3330 #ifdef CONFIG_RFS_ACCEL
3333 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3334 * @dev: Device on which the filter was set
3335 * @rxq_index: RX queue index
3336 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3337 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3339 * Drivers that implement ndo_rx_flow_steer() should periodically call
3340 * this function for each installed filter and remove the filters for
3341 * which it returns %true.
3343 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3344 u32 flow_id, u16 filter_id)
3346 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3347 struct rps_dev_flow_table *flow_table;
3348 struct rps_dev_flow *rflow;
3353 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3354 if (flow_table && flow_id <= flow_table->mask) {
3355 rflow = &flow_table->flows[flow_id];
3356 cpu = ACCESS_ONCE(rflow->cpu);
3357 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3358 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3359 rflow->last_qtail) <
3360 (int)(10 * flow_table->mask)))
3366 EXPORT_SYMBOL(rps_may_expire_flow);
3368 #endif /* CONFIG_RFS_ACCEL */
3370 /* Called from hardirq (IPI) context */
3371 static void rps_trigger_softirq(void *data)
3373 struct softnet_data *sd = data;
3375 ____napi_schedule(sd, &sd->backlog);
3379 #endif /* CONFIG_RPS */
3382 * Check if this softnet_data structure is another cpu one
3383 * If yes, queue it to our IPI list and return 1
3386 static int rps_ipi_queued(struct softnet_data *sd)
3389 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3392 sd->rps_ipi_next = mysd->rps_ipi_list;
3393 mysd->rps_ipi_list = sd;
3395 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3398 #endif /* CONFIG_RPS */
3402 #ifdef CONFIG_NET_FLOW_LIMIT
3403 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3406 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3408 #ifdef CONFIG_NET_FLOW_LIMIT
3409 struct sd_flow_limit *fl;
3410 struct softnet_data *sd;
3411 unsigned int old_flow, new_flow;
3413 if (qlen < (netdev_max_backlog >> 1))
3416 sd = this_cpu_ptr(&softnet_data);
3419 fl = rcu_dereference(sd->flow_limit);
3421 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3422 old_flow = fl->history[fl->history_head];
3423 fl->history[fl->history_head] = new_flow;
3426 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3428 if (likely(fl->buckets[old_flow]))
3429 fl->buckets[old_flow]--;
3431 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3443 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3444 * queue (may be a remote CPU queue).
3446 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3447 unsigned int *qtail)
3449 struct softnet_data *sd;
3450 unsigned long flags;
3453 sd = &per_cpu(softnet_data, cpu);
3455 local_irq_save(flags);
3458 qlen = skb_queue_len(&sd->input_pkt_queue);
3459 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3462 __skb_queue_tail(&sd->input_pkt_queue, skb);
3463 input_queue_tail_incr_save(sd, qtail);
3465 local_irq_restore(flags);
3466 return NET_RX_SUCCESS;
3469 /* Schedule NAPI for backlog device
3470 * We can use non atomic operation since we own the queue lock
3472 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3473 if (!rps_ipi_queued(sd))
3474 ____napi_schedule(sd, &sd->backlog);
3482 local_irq_restore(flags);
3484 atomic_long_inc(&skb->dev->rx_dropped);
3489 static int netif_rx_internal(struct sk_buff *skb)
3493 net_timestamp_check(netdev_tstamp_prequeue, skb);
3495 trace_netif_rx(skb);
3497 if (static_key_false(&rps_needed)) {
3498 struct rps_dev_flow voidflow, *rflow = &voidflow;
3504 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3506 cpu = smp_processor_id();
3508 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3516 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3523 * netif_rx - post buffer to the network code
3524 * @skb: buffer to post
3526 * This function receives a packet from a device driver and queues it for
3527 * the upper (protocol) levels to process. It always succeeds. The buffer
3528 * may be dropped during processing for congestion control or by the
3532 * NET_RX_SUCCESS (no congestion)
3533 * NET_RX_DROP (packet was dropped)
3537 int netif_rx(struct sk_buff *skb)
3539 trace_netif_rx_entry(skb);
3541 return netif_rx_internal(skb);
3543 EXPORT_SYMBOL(netif_rx);
3545 int netif_rx_ni(struct sk_buff *skb)
3549 trace_netif_rx_ni_entry(skb);
3552 err = netif_rx_internal(skb);
3553 if (local_softirq_pending())
3559 EXPORT_SYMBOL(netif_rx_ni);
3561 static void net_tx_action(struct softirq_action *h)
3563 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3565 if (sd->completion_queue) {
3566 struct sk_buff *clist;
3568 local_irq_disable();
3569 clist = sd->completion_queue;
3570 sd->completion_queue = NULL;
3574 struct sk_buff *skb = clist;
3575 clist = clist->next;
3577 WARN_ON(atomic_read(&skb->users));
3578 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3579 trace_consume_skb(skb);
3581 trace_kfree_skb(skb, net_tx_action);
3586 if (sd->output_queue) {
3589 local_irq_disable();
3590 head = sd->output_queue;
3591 sd->output_queue = NULL;
3592 sd->output_queue_tailp = &sd->output_queue;
3596 struct Qdisc *q = head;
3597 spinlock_t *root_lock;
3599 head = head->next_sched;
3601 root_lock = qdisc_lock(q);
3602 if (spin_trylock(root_lock)) {
3603 smp_mb__before_atomic();
3604 clear_bit(__QDISC_STATE_SCHED,
3607 spin_unlock(root_lock);
3609 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3611 __netif_reschedule(q);
3613 smp_mb__before_atomic();
3614 clear_bit(__QDISC_STATE_SCHED,
3622 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3623 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3624 /* This hook is defined here for ATM LANE */
3625 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3626 unsigned char *addr) __read_mostly;
3627 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3630 #ifdef CONFIG_NET_CLS_ACT
3631 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3632 struct packet_type **pt_prev,
3633 int *ret, struct net_device *orig_dev)
3635 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3636 struct tcf_result cl_res;
3638 /* If there's at least one ingress present somewhere (so
3639 * we get here via enabled static key), remaining devices
3640 * that are not configured with an ingress qdisc will bail
3646 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3650 qdisc_skb_cb(skb)->pkt_len = skb->len;
3651 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3652 qdisc_bstats_update_cpu(cl->q, skb);
3654 switch (tc_classify(skb, cl, &cl_res)) {
3656 case TC_ACT_RECLASSIFY:
3657 skb->tc_index = TC_H_MIN(cl_res.classid);
3660 qdisc_qstats_drop_cpu(cl->q);
3672 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3673 struct packet_type **pt_prev,
3674 int *ret, struct net_device *orig_dev)
3681 * netdev_rx_handler_register - register receive handler
3682 * @dev: device to register a handler for
3683 * @rx_handler: receive handler to register
3684 * @rx_handler_data: data pointer that is used by rx handler
3686 * Register a receive handler for a device. This handler will then be
3687 * called from __netif_receive_skb. A negative errno code is returned
3690 * The caller must hold the rtnl_mutex.
3692 * For a general description of rx_handler, see enum rx_handler_result.
3694 int netdev_rx_handler_register(struct net_device *dev,
3695 rx_handler_func_t *rx_handler,
3696 void *rx_handler_data)
3700 if (dev->rx_handler)
3703 /* Note: rx_handler_data must be set before rx_handler */
3704 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3705 rcu_assign_pointer(dev->rx_handler, rx_handler);
3709 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3712 * netdev_rx_handler_unregister - unregister receive handler
3713 * @dev: device to unregister a handler from
3715 * Unregister a receive handler from a device.
3717 * The caller must hold the rtnl_mutex.
3719 void netdev_rx_handler_unregister(struct net_device *dev)
3723 RCU_INIT_POINTER(dev->rx_handler, NULL);
3724 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3725 * section has a guarantee to see a non NULL rx_handler_data
3729 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3731 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3734 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3735 * the special handling of PFMEMALLOC skbs.
3737 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3739 switch (skb->protocol) {
3740 case htons(ETH_P_ARP):
3741 case htons(ETH_P_IP):
3742 case htons(ETH_P_IPV6):
3743 case htons(ETH_P_8021Q):
3744 case htons(ETH_P_8021AD):
3751 #ifdef CONFIG_NETFILTER_INGRESS
3752 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
3753 int *ret, struct net_device *orig_dev)
3755 if (nf_hook_ingress_active(skb)) {
3757 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3761 return nf_hook_ingress(skb);
3766 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
3767 int *ret, struct net_device *orig_dev)
3773 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3775 struct packet_type *ptype, *pt_prev;
3776 rx_handler_func_t *rx_handler;
3777 struct net_device *orig_dev;
3778 bool deliver_exact = false;
3779 int ret = NET_RX_DROP;
3782 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3784 trace_netif_receive_skb(skb);
3786 orig_dev = skb->dev;
3788 skb_reset_network_header(skb);
3789 if (!skb_transport_header_was_set(skb))
3790 skb_reset_transport_header(skb);
3791 skb_reset_mac_len(skb);
3798 skb->skb_iif = skb->dev->ifindex;
3800 __this_cpu_inc(softnet_data.processed);
3802 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3803 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3804 skb = skb_vlan_untag(skb);
3809 #ifdef CONFIG_NET_CLS_ACT
3810 if (skb->tc_verd & TC_NCLS) {
3811 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3819 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3821 ret = deliver_skb(skb, pt_prev, orig_dev);
3825 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3827 ret = deliver_skb(skb, pt_prev, orig_dev);
3832 #ifdef CONFIG_NET_INGRESS
3833 if (static_key_false(&ingress_needed)) {
3834 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3838 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
3842 #ifdef CONFIG_NET_CLS_ACT
3846 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3849 if (skb_vlan_tag_present(skb)) {
3851 ret = deliver_skb(skb, pt_prev, orig_dev);
3854 if (vlan_do_receive(&skb))
3856 else if (unlikely(!skb))
3860 rx_handler = rcu_dereference(skb->dev->rx_handler);
3863 ret = deliver_skb(skb, pt_prev, orig_dev);
3866 switch (rx_handler(&skb)) {
3867 case RX_HANDLER_CONSUMED:
3868 ret = NET_RX_SUCCESS;
3870 case RX_HANDLER_ANOTHER:
3872 case RX_HANDLER_EXACT:
3873 deliver_exact = true;
3874 case RX_HANDLER_PASS:
3881 if (unlikely(skb_vlan_tag_present(skb))) {
3882 if (skb_vlan_tag_get_id(skb))
3883 skb->pkt_type = PACKET_OTHERHOST;
3884 /* Note: we might in the future use prio bits
3885 * and set skb->priority like in vlan_do_receive()
3886 * For the time being, just ignore Priority Code Point
3891 type = skb->protocol;
3893 /* deliver only exact match when indicated */
3894 if (likely(!deliver_exact)) {
3895 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3896 &ptype_base[ntohs(type) &
3900 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3901 &orig_dev->ptype_specific);
3903 if (unlikely(skb->dev != orig_dev)) {
3904 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3905 &skb->dev->ptype_specific);
3909 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3912 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3915 atomic_long_inc(&skb->dev->rx_dropped);
3917 /* Jamal, now you will not able to escape explaining
3918 * me how you were going to use this. :-)
3928 static int __netif_receive_skb(struct sk_buff *skb)
3932 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3933 unsigned long pflags = current->flags;
3936 * PFMEMALLOC skbs are special, they should
3937 * - be delivered to SOCK_MEMALLOC sockets only
3938 * - stay away from userspace
3939 * - have bounded memory usage
3941 * Use PF_MEMALLOC as this saves us from propagating the allocation
3942 * context down to all allocation sites.
3944 current->flags |= PF_MEMALLOC;
3945 ret = __netif_receive_skb_core(skb, true);
3946 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3948 ret = __netif_receive_skb_core(skb, false);
3953 static int netif_receive_skb_internal(struct sk_buff *skb)
3955 net_timestamp_check(netdev_tstamp_prequeue, skb);
3957 if (skb_defer_rx_timestamp(skb))
3958 return NET_RX_SUCCESS;
3961 if (static_key_false(&rps_needed)) {
3962 struct rps_dev_flow voidflow, *rflow = &voidflow;
3967 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3970 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3977 return __netif_receive_skb(skb);
3981 * netif_receive_skb - process receive buffer from network
3982 * @skb: buffer to process
3984 * netif_receive_skb() is the main receive data processing function.
3985 * It always succeeds. The buffer may be dropped during processing
3986 * for congestion control or by the protocol layers.
3988 * This function may only be called from softirq context and interrupts
3989 * should be enabled.
3991 * Return values (usually ignored):
3992 * NET_RX_SUCCESS: no congestion
3993 * NET_RX_DROP: packet was dropped
3995 int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
3997 trace_netif_receive_skb_entry(skb);
3999 return netif_receive_skb_internal(skb);
4001 EXPORT_SYMBOL(netif_receive_skb_sk);
4003 /* Network device is going away, flush any packets still pending
4004 * Called with irqs disabled.
4006 static void flush_backlog(void *arg)
4008 struct net_device *dev = arg;
4009 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4010 struct sk_buff *skb, *tmp;
4013 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
4014 if (skb->dev == dev) {
4015 __skb_unlink(skb, &sd->input_pkt_queue);
4017 input_queue_head_incr(sd);
4022 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4023 if (skb->dev == dev) {
4024 __skb_unlink(skb, &sd->process_queue);
4026 input_queue_head_incr(sd);
4031 static int napi_gro_complete(struct sk_buff *skb)
4033 struct packet_offload *ptype;
4034 __be16 type = skb->protocol;
4035 struct list_head *head = &offload_base;
4038 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4040 if (NAPI_GRO_CB(skb)->count == 1) {
4041 skb_shinfo(skb)->gso_size = 0;
4046 list_for_each_entry_rcu(ptype, head, list) {
4047 if (ptype->type != type || !ptype->callbacks.gro_complete)
4050 err = ptype->callbacks.gro_complete(skb, 0);
4056 WARN_ON(&ptype->list == head);
4058 return NET_RX_SUCCESS;
4062 return netif_receive_skb_internal(skb);
4065 /* napi->gro_list contains packets ordered by age.
4066 * youngest packets at the head of it.
4067 * Complete skbs in reverse order to reduce latencies.
4069 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
4071 struct sk_buff *skb, *prev = NULL;
4073 /* scan list and build reverse chain */
4074 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4079 for (skb = prev; skb; skb = prev) {
4082 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4086 napi_gro_complete(skb);
4090 napi->gro_list = NULL;
4092 EXPORT_SYMBOL(napi_gro_flush);
4094 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4097 unsigned int maclen = skb->dev->hard_header_len;
4098 u32 hash = skb_get_hash_raw(skb);
4100 for (p = napi->gro_list; p; p = p->next) {
4101 unsigned long diffs;
4103 NAPI_GRO_CB(p)->flush = 0;
4105 if (hash != skb_get_hash_raw(p)) {
4106 NAPI_GRO_CB(p)->same_flow = 0;
4110 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4111 diffs |= p->vlan_tci ^ skb->vlan_tci;
4112 if (maclen == ETH_HLEN)
4113 diffs |= compare_ether_header(skb_mac_header(p),
4114 skb_mac_header(skb));
4116 diffs = memcmp(skb_mac_header(p),
4117 skb_mac_header(skb),
4119 NAPI_GRO_CB(p)->same_flow = !diffs;
4123 static void skb_gro_reset_offset(struct sk_buff *skb)
4125 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4126 const skb_frag_t *frag0 = &pinfo->frags[0];
4128 NAPI_GRO_CB(skb)->data_offset = 0;
4129 NAPI_GRO_CB(skb)->frag0 = NULL;
4130 NAPI_GRO_CB(skb)->frag0_len = 0;
4132 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4134 !PageHighMem(skb_frag_page(frag0))) {
4135 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4136 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
4140 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4142 struct skb_shared_info *pinfo = skb_shinfo(skb);
4144 BUG_ON(skb->end - skb->tail < grow);
4146 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4148 skb->data_len -= grow;
4151 pinfo->frags[0].page_offset += grow;
4152 skb_frag_size_sub(&pinfo->frags[0], grow);
4154 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4155 skb_frag_unref(skb, 0);
4156 memmove(pinfo->frags, pinfo->frags + 1,
4157 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4161 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4163 struct sk_buff **pp = NULL;
4164 struct packet_offload *ptype;
4165 __be16 type = skb->protocol;
4166 struct list_head *head = &offload_base;
4168 enum gro_result ret;
4171 if (!(skb->dev->features & NETIF_F_GRO))
4174 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4177 gro_list_prepare(napi, skb);
4180 list_for_each_entry_rcu(ptype, head, list) {
4181 if (ptype->type != type || !ptype->callbacks.gro_receive)
4184 skb_set_network_header(skb, skb_gro_offset(skb));
4185 skb_reset_mac_len(skb);
4186 NAPI_GRO_CB(skb)->same_flow = 0;
4187 NAPI_GRO_CB(skb)->flush = 0;
4188 NAPI_GRO_CB(skb)->free = 0;
4189 NAPI_GRO_CB(skb)->udp_mark = 0;
4190 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4192 /* Setup for GRO checksum validation */
4193 switch (skb->ip_summed) {
4194 case CHECKSUM_COMPLETE:
4195 NAPI_GRO_CB(skb)->csum = skb->csum;
4196 NAPI_GRO_CB(skb)->csum_valid = 1;
4197 NAPI_GRO_CB(skb)->csum_cnt = 0;
4199 case CHECKSUM_UNNECESSARY:
4200 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4201 NAPI_GRO_CB(skb)->csum_valid = 0;
4204 NAPI_GRO_CB(skb)->csum_cnt = 0;
4205 NAPI_GRO_CB(skb)->csum_valid = 0;
4208 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4213 if (&ptype->list == head)
4216 same_flow = NAPI_GRO_CB(skb)->same_flow;
4217 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4220 struct sk_buff *nskb = *pp;
4224 napi_gro_complete(nskb);
4231 if (NAPI_GRO_CB(skb)->flush)
4234 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4235 struct sk_buff *nskb = napi->gro_list;
4237 /* locate the end of the list to select the 'oldest' flow */
4238 while (nskb->next) {
4244 napi_gro_complete(nskb);
4248 NAPI_GRO_CB(skb)->count = 1;
4249 NAPI_GRO_CB(skb)->age = jiffies;
4250 NAPI_GRO_CB(skb)->last = skb;
4251 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4252 skb->next = napi->gro_list;
4253 napi->gro_list = skb;
4257 grow = skb_gro_offset(skb) - skb_headlen(skb);
4259 gro_pull_from_frag0(skb, grow);
4268 struct packet_offload *gro_find_receive_by_type(__be16 type)
4270 struct list_head *offload_head = &offload_base;
4271 struct packet_offload *ptype;
4273 list_for_each_entry_rcu(ptype, offload_head, list) {
4274 if (ptype->type != type || !ptype->callbacks.gro_receive)
4280 EXPORT_SYMBOL(gro_find_receive_by_type);
4282 struct packet_offload *gro_find_complete_by_type(__be16 type)
4284 struct list_head *offload_head = &offload_base;
4285 struct packet_offload *ptype;
4287 list_for_each_entry_rcu(ptype, offload_head, list) {
4288 if (ptype->type != type || !ptype->callbacks.gro_complete)
4294 EXPORT_SYMBOL(gro_find_complete_by_type);
4296 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4300 if (netif_receive_skb_internal(skb))
4308 case GRO_MERGED_FREE:
4309 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4310 kmem_cache_free(skbuff_head_cache, skb);
4323 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4325 trace_napi_gro_receive_entry(skb);
4327 skb_gro_reset_offset(skb);
4329 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4331 EXPORT_SYMBOL(napi_gro_receive);
4333 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4335 if (unlikely(skb->pfmemalloc)) {
4339 __skb_pull(skb, skb_headlen(skb));
4340 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4341 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4343 skb->dev = napi->dev;
4345 skb->encapsulation = 0;
4346 skb_shinfo(skb)->gso_type = 0;
4347 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4352 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4354 struct sk_buff *skb = napi->skb;
4357 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4362 EXPORT_SYMBOL(napi_get_frags);
4364 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4365 struct sk_buff *skb,
4371 __skb_push(skb, ETH_HLEN);
4372 skb->protocol = eth_type_trans(skb, skb->dev);
4373 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4378 case GRO_MERGED_FREE:
4379 napi_reuse_skb(napi, skb);
4389 /* Upper GRO stack assumes network header starts at gro_offset=0
4390 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4391 * We copy ethernet header into skb->data to have a common layout.
4393 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4395 struct sk_buff *skb = napi->skb;
4396 const struct ethhdr *eth;
4397 unsigned int hlen = sizeof(*eth);
4401 skb_reset_mac_header(skb);
4402 skb_gro_reset_offset(skb);
4404 eth = skb_gro_header_fast(skb, 0);
4405 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4406 eth = skb_gro_header_slow(skb, hlen, 0);
4407 if (unlikely(!eth)) {
4408 napi_reuse_skb(napi, skb);
4412 gro_pull_from_frag0(skb, hlen);
4413 NAPI_GRO_CB(skb)->frag0 += hlen;
4414 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4416 __skb_pull(skb, hlen);
4419 * This works because the only protocols we care about don't require
4421 * We'll fix it up properly in napi_frags_finish()
4423 skb->protocol = eth->h_proto;
4428 gro_result_t napi_gro_frags(struct napi_struct *napi)
4430 struct sk_buff *skb = napi_frags_skb(napi);
4435 trace_napi_gro_frags_entry(skb);
4437 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4439 EXPORT_SYMBOL(napi_gro_frags);
4441 /* Compute the checksum from gro_offset and return the folded value
4442 * after adding in any pseudo checksum.
4444 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4449 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4451 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4452 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4454 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4455 !skb->csum_complete_sw)
4456 netdev_rx_csum_fault(skb->dev);
4459 NAPI_GRO_CB(skb)->csum = wsum;
4460 NAPI_GRO_CB(skb)->csum_valid = 1;
4464 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4467 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4468 * Note: called with local irq disabled, but exits with local irq enabled.
4470 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4473 struct softnet_data *remsd = sd->rps_ipi_list;
4476 sd->rps_ipi_list = NULL;
4480 /* Send pending IPI's to kick RPS processing on remote cpus. */
4482 struct softnet_data *next = remsd->rps_ipi_next;
4484 if (cpu_online(remsd->cpu))
4485 smp_call_function_single_async(remsd->cpu,
4494 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4497 return sd->rps_ipi_list != NULL;
4503 static int process_backlog(struct napi_struct *napi, int quota)
4506 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4508 /* Check if we have pending ipi, its better to send them now,
4509 * not waiting net_rx_action() end.
4511 if (sd_has_rps_ipi_waiting(sd)) {
4512 local_irq_disable();
4513 net_rps_action_and_irq_enable(sd);
4516 napi->weight = weight_p;
4517 local_irq_disable();
4519 struct sk_buff *skb;
4521 while ((skb = __skb_dequeue(&sd->process_queue))) {
4523 __netif_receive_skb(skb);
4524 local_irq_disable();
4525 input_queue_head_incr(sd);
4526 if (++work >= quota) {
4533 if (skb_queue_empty(&sd->input_pkt_queue)) {
4535 * Inline a custom version of __napi_complete().
4536 * only current cpu owns and manipulates this napi,
4537 * and NAPI_STATE_SCHED is the only possible flag set
4539 * We can use a plain write instead of clear_bit(),
4540 * and we dont need an smp_mb() memory barrier.
4548 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4549 &sd->process_queue);
4558 * __napi_schedule - schedule for receive
4559 * @n: entry to schedule
4561 * The entry's receive function will be scheduled to run.
4562 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4564 void __napi_schedule(struct napi_struct *n)
4566 unsigned long flags;
4568 local_irq_save(flags);
4569 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4570 local_irq_restore(flags);
4572 EXPORT_SYMBOL(__napi_schedule);
4575 * __napi_schedule_irqoff - schedule for receive
4576 * @n: entry to schedule
4578 * Variant of __napi_schedule() assuming hard irqs are masked
4580 void __napi_schedule_irqoff(struct napi_struct *n)
4582 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4584 EXPORT_SYMBOL(__napi_schedule_irqoff);
4586 void __napi_complete(struct napi_struct *n)
4588 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4590 list_del_init(&n->poll_list);
4591 smp_mb__before_atomic();
4592 clear_bit(NAPI_STATE_SCHED, &n->state);
4594 EXPORT_SYMBOL(__napi_complete);
4596 void napi_complete_done(struct napi_struct *n, int work_done)
4598 unsigned long flags;
4601 * don't let napi dequeue from the cpu poll list
4602 * just in case its running on a different cpu
4604 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4608 unsigned long timeout = 0;
4611 timeout = n->dev->gro_flush_timeout;
4614 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4615 HRTIMER_MODE_REL_PINNED);
4617 napi_gro_flush(n, false);
4619 if (likely(list_empty(&n->poll_list))) {
4620 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4622 /* If n->poll_list is not empty, we need to mask irqs */
4623 local_irq_save(flags);
4625 local_irq_restore(flags);
4628 EXPORT_SYMBOL(napi_complete_done);
4630 /* must be called under rcu_read_lock(), as we dont take a reference */
4631 struct napi_struct *napi_by_id(unsigned int napi_id)
4633 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4634 struct napi_struct *napi;
4636 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4637 if (napi->napi_id == napi_id)
4642 EXPORT_SYMBOL_GPL(napi_by_id);
4644 void napi_hash_add(struct napi_struct *napi)
4646 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4648 spin_lock(&napi_hash_lock);
4650 /* 0 is not a valid id, we also skip an id that is taken
4651 * we expect both events to be extremely rare
4654 while (!napi->napi_id) {
4655 napi->napi_id = ++napi_gen_id;
4656 if (napi_by_id(napi->napi_id))
4660 hlist_add_head_rcu(&napi->napi_hash_node,
4661 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4663 spin_unlock(&napi_hash_lock);
4666 EXPORT_SYMBOL_GPL(napi_hash_add);
4668 /* Warning : caller is responsible to make sure rcu grace period
4669 * is respected before freeing memory containing @napi
4671 void napi_hash_del(struct napi_struct *napi)
4673 spin_lock(&napi_hash_lock);
4675 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4676 hlist_del_rcu(&napi->napi_hash_node);
4678 spin_unlock(&napi_hash_lock);
4680 EXPORT_SYMBOL_GPL(napi_hash_del);
4682 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4684 struct napi_struct *napi;
4686 napi = container_of(timer, struct napi_struct, timer);
4688 napi_schedule(napi);
4690 return HRTIMER_NORESTART;
4693 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4694 int (*poll)(struct napi_struct *, int), int weight)
4696 INIT_LIST_HEAD(&napi->poll_list);
4697 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4698 napi->timer.function = napi_watchdog;
4699 napi->gro_count = 0;
4700 napi->gro_list = NULL;
4703 if (weight > NAPI_POLL_WEIGHT)
4704 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4706 napi->weight = weight;
4707 list_add(&napi->dev_list, &dev->napi_list);
4709 #ifdef CONFIG_NETPOLL
4710 spin_lock_init(&napi->poll_lock);
4711 napi->poll_owner = -1;
4713 set_bit(NAPI_STATE_SCHED, &napi->state);
4715 EXPORT_SYMBOL(netif_napi_add);
4717 void napi_disable(struct napi_struct *n)
4720 set_bit(NAPI_STATE_DISABLE, &n->state);
4722 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4725 hrtimer_cancel(&n->timer);
4727 clear_bit(NAPI_STATE_DISABLE, &n->state);
4729 EXPORT_SYMBOL(napi_disable);
4731 void netif_napi_del(struct napi_struct *napi)
4733 list_del_init(&napi->dev_list);
4734 napi_free_frags(napi);
4736 kfree_skb_list(napi->gro_list);
4737 napi->gro_list = NULL;
4738 napi->gro_count = 0;
4740 EXPORT_SYMBOL(netif_napi_del);
4742 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4747 list_del_init(&n->poll_list);
4749 have = netpoll_poll_lock(n);
4753 /* This NAPI_STATE_SCHED test is for avoiding a race
4754 * with netpoll's poll_napi(). Only the entity which
4755 * obtains the lock and sees NAPI_STATE_SCHED set will
4756 * actually make the ->poll() call. Therefore we avoid
4757 * accidentally calling ->poll() when NAPI is not scheduled.
4760 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4761 work = n->poll(n, weight);
4765 WARN_ON_ONCE(work > weight);
4767 if (likely(work < weight))
4770 /* Drivers must not modify the NAPI state if they
4771 * consume the entire weight. In such cases this code
4772 * still "owns" the NAPI instance and therefore can
4773 * move the instance around on the list at-will.
4775 if (unlikely(napi_disable_pending(n))) {
4781 /* flush too old packets
4782 * If HZ < 1000, flush all packets.
4784 napi_gro_flush(n, HZ >= 1000);
4787 /* Some drivers may have called napi_schedule
4788 * prior to exhausting their budget.
4790 if (unlikely(!list_empty(&n->poll_list))) {
4791 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4792 n->dev ? n->dev->name : "backlog");
4796 list_add_tail(&n->poll_list, repoll);
4799 netpoll_poll_unlock(have);
4804 static void net_rx_action(struct softirq_action *h)
4806 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4807 unsigned long time_limit = jiffies + 2;
4808 int budget = netdev_budget;
4812 local_irq_disable();
4813 list_splice_init(&sd->poll_list, &list);
4817 struct napi_struct *n;
4819 if (list_empty(&list)) {
4820 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4825 n = list_first_entry(&list, struct napi_struct, poll_list);
4826 budget -= napi_poll(n, &repoll);
4828 /* If softirq window is exhausted then punt.
4829 * Allow this to run for 2 jiffies since which will allow
4830 * an average latency of 1.5/HZ.
4832 if (unlikely(budget <= 0 ||
4833 time_after_eq(jiffies, time_limit))) {
4839 local_irq_disable();
4841 list_splice_tail_init(&sd->poll_list, &list);
4842 list_splice_tail(&repoll, &list);
4843 list_splice(&list, &sd->poll_list);
4844 if (!list_empty(&sd->poll_list))
4845 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4847 net_rps_action_and_irq_enable(sd);
4850 struct netdev_adjacent {
4851 struct net_device *dev;
4853 /* upper master flag, there can only be one master device per list */
4856 /* counter for the number of times this device was added to us */
4859 /* private field for the users */
4862 struct list_head list;
4863 struct rcu_head rcu;
4866 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4867 struct net_device *adj_dev,
4868 struct list_head *adj_list)
4870 struct netdev_adjacent *adj;
4872 list_for_each_entry(adj, adj_list, list) {
4873 if (adj->dev == adj_dev)
4880 * netdev_has_upper_dev - Check if device is linked to an upper device
4882 * @upper_dev: upper device to check
4884 * Find out if a device is linked to specified upper device and return true
4885 * in case it is. Note that this checks only immediate upper device,
4886 * not through a complete stack of devices. The caller must hold the RTNL lock.
4888 bool netdev_has_upper_dev(struct net_device *dev,
4889 struct net_device *upper_dev)
4893 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4895 EXPORT_SYMBOL(netdev_has_upper_dev);
4898 * netdev_has_any_upper_dev - Check if device is linked to some device
4901 * Find out if a device is linked to an upper device and return true in case
4902 * it is. The caller must hold the RTNL lock.
4904 static bool netdev_has_any_upper_dev(struct net_device *dev)
4908 return !list_empty(&dev->all_adj_list.upper);
4912 * netdev_master_upper_dev_get - Get master upper device
4915 * Find a master upper device and return pointer to it or NULL in case
4916 * it's not there. The caller must hold the RTNL lock.
4918 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4920 struct netdev_adjacent *upper;
4924 if (list_empty(&dev->adj_list.upper))
4927 upper = list_first_entry(&dev->adj_list.upper,
4928 struct netdev_adjacent, list);
4929 if (likely(upper->master))
4933 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4935 void *netdev_adjacent_get_private(struct list_head *adj_list)
4937 struct netdev_adjacent *adj;
4939 adj = list_entry(adj_list, struct netdev_adjacent, list);
4941 return adj->private;
4943 EXPORT_SYMBOL(netdev_adjacent_get_private);
4946 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4948 * @iter: list_head ** of the current position
4950 * Gets the next device from the dev's upper list, starting from iter
4951 * position. The caller must hold RCU read lock.
4953 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4954 struct list_head **iter)
4956 struct netdev_adjacent *upper;
4958 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4960 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4962 if (&upper->list == &dev->adj_list.upper)
4965 *iter = &upper->list;
4969 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4972 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4974 * @iter: list_head ** of the current position
4976 * Gets the next device from the dev's upper list, starting from iter
4977 * position. The caller must hold RCU read lock.
4979 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4980 struct list_head **iter)
4982 struct netdev_adjacent *upper;
4984 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4986 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4988 if (&upper->list == &dev->all_adj_list.upper)
4991 *iter = &upper->list;
4995 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4998 * netdev_lower_get_next_private - Get the next ->private from the
4999 * lower neighbour list
5001 * @iter: list_head ** of the current position
5003 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5004 * list, starting from iter position. The caller must hold either hold the
5005 * RTNL lock or its own locking that guarantees that the neighbour lower
5006 * list will remain unchainged.
5008 void *netdev_lower_get_next_private(struct net_device *dev,
5009 struct list_head **iter)
5011 struct netdev_adjacent *lower;
5013 lower = list_entry(*iter, struct netdev_adjacent, list);
5015 if (&lower->list == &dev->adj_list.lower)
5018 *iter = lower->list.next;
5020 return lower->private;
5022 EXPORT_SYMBOL(netdev_lower_get_next_private);
5025 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5026 * lower neighbour list, RCU
5029 * @iter: list_head ** of the current position
5031 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5032 * list, starting from iter position. The caller must hold RCU read lock.
5034 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5035 struct list_head **iter)
5037 struct netdev_adjacent *lower;
5039 WARN_ON_ONCE(!rcu_read_lock_held());
5041 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5043 if (&lower->list == &dev->adj_list.lower)
5046 *iter = &lower->list;
5048 return lower->private;
5050 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5053 * netdev_lower_get_next - Get the next device from the lower neighbour
5056 * @iter: list_head ** of the current position
5058 * Gets the next netdev_adjacent from the dev's lower neighbour
5059 * list, starting from iter position. The caller must hold RTNL lock or
5060 * its own locking that guarantees that the neighbour lower
5061 * list will remain unchainged.
5063 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5065 struct netdev_adjacent *lower;
5067 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5069 if (&lower->list == &dev->adj_list.lower)
5072 *iter = &lower->list;
5076 EXPORT_SYMBOL(netdev_lower_get_next);
5079 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5080 * lower neighbour list, RCU
5084 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5085 * list. The caller must hold RCU read lock.
5087 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5089 struct netdev_adjacent *lower;
5091 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5092 struct netdev_adjacent, list);
5094 return lower->private;
5097 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5100 * netdev_master_upper_dev_get_rcu - Get master upper device
5103 * Find a master upper device and return pointer to it or NULL in case
5104 * it's not there. The caller must hold the RCU read lock.
5106 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5108 struct netdev_adjacent *upper;
5110 upper = list_first_or_null_rcu(&dev->adj_list.upper,
5111 struct netdev_adjacent, list);
5112 if (upper && likely(upper->master))
5116 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5118 static int netdev_adjacent_sysfs_add(struct net_device *dev,
5119 struct net_device *adj_dev,
5120 struct list_head *dev_list)
5122 char linkname[IFNAMSIZ+7];
5123 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5124 "upper_%s" : "lower_%s", adj_dev->name);
5125 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5128 static void netdev_adjacent_sysfs_del(struct net_device *dev,
5130 struct list_head *dev_list)
5132 char linkname[IFNAMSIZ+7];
5133 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5134 "upper_%s" : "lower_%s", name);
5135 sysfs_remove_link(&(dev->dev.kobj), linkname);
5138 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5139 struct net_device *adj_dev,
5140 struct list_head *dev_list)
5142 return (dev_list == &dev->adj_list.upper ||
5143 dev_list == &dev->adj_list.lower) &&
5144 net_eq(dev_net(dev), dev_net(adj_dev));
5147 static int __netdev_adjacent_dev_insert(struct net_device *dev,
5148 struct net_device *adj_dev,
5149 struct list_head *dev_list,
5150 void *private, bool master)
5152 struct netdev_adjacent *adj;
5155 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5162 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5167 adj->master = master;
5169 adj->private = private;
5172 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5173 adj_dev->name, dev->name, adj_dev->name);
5175 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
5176 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5181 /* Ensure that master link is always the first item in list. */
5183 ret = sysfs_create_link(&(dev->dev.kobj),
5184 &(adj_dev->dev.kobj), "master");
5186 goto remove_symlinks;
5188 list_add_rcu(&adj->list, dev_list);
5190 list_add_tail_rcu(&adj->list, dev_list);
5196 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5197 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5205 static void __netdev_adjacent_dev_remove(struct net_device *dev,
5206 struct net_device *adj_dev,
5207 struct list_head *dev_list)
5209 struct netdev_adjacent *adj;
5211 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5214 pr_err("tried to remove device %s from %s\n",
5215 dev->name, adj_dev->name);
5219 if (adj->ref_nr > 1) {
5220 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5227 sysfs_remove_link(&(dev->dev.kobj), "master");
5229 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5230 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5232 list_del_rcu(&adj->list);
5233 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5234 adj_dev->name, dev->name, adj_dev->name);
5236 kfree_rcu(adj, rcu);
5239 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5240 struct net_device *upper_dev,
5241 struct list_head *up_list,
5242 struct list_head *down_list,
5243 void *private, bool master)
5247 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5252 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5255 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5262 static int __netdev_adjacent_dev_link(struct net_device *dev,
5263 struct net_device *upper_dev)
5265 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5266 &dev->all_adj_list.upper,
5267 &upper_dev->all_adj_list.lower,
5271 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5272 struct net_device *upper_dev,
5273 struct list_head *up_list,
5274 struct list_head *down_list)
5276 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5277 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5280 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5281 struct net_device *upper_dev)
5283 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5284 &dev->all_adj_list.upper,
5285 &upper_dev->all_adj_list.lower);
5288 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5289 struct net_device *upper_dev,
5290 void *private, bool master)
5292 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5297 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5298 &dev->adj_list.upper,
5299 &upper_dev->adj_list.lower,
5302 __netdev_adjacent_dev_unlink(dev, upper_dev);
5309 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5310 struct net_device *upper_dev)
5312 __netdev_adjacent_dev_unlink(dev, upper_dev);
5313 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5314 &dev->adj_list.upper,
5315 &upper_dev->adj_list.lower);
5318 static int __netdev_upper_dev_link(struct net_device *dev,
5319 struct net_device *upper_dev, bool master,
5322 struct netdev_adjacent *i, *j, *to_i, *to_j;
5327 if (dev == upper_dev)
5330 /* To prevent loops, check if dev is not upper device to upper_dev. */
5331 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
5334 if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
5337 if (master && netdev_master_upper_dev_get(dev))
5340 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5345 /* Now that we linked these devs, make all the upper_dev's
5346 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5347 * versa, and don't forget the devices itself. All of these
5348 * links are non-neighbours.
5350 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5351 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5352 pr_debug("Interlinking %s with %s, non-neighbour\n",
5353 i->dev->name, j->dev->name);
5354 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5360 /* add dev to every upper_dev's upper device */
5361 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5362 pr_debug("linking %s's upper device %s with %s\n",
5363 upper_dev->name, i->dev->name, dev->name);
5364 ret = __netdev_adjacent_dev_link(dev, i->dev);
5366 goto rollback_upper_mesh;
5369 /* add upper_dev to every dev's lower device */
5370 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5371 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5372 i->dev->name, upper_dev->name);
5373 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5375 goto rollback_lower_mesh;
5378 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5381 rollback_lower_mesh:
5383 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5386 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5391 rollback_upper_mesh:
5393 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5396 __netdev_adjacent_dev_unlink(dev, i->dev);
5404 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5405 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5406 if (i == to_i && j == to_j)
5408 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5414 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5420 * netdev_upper_dev_link - Add a link to the upper device
5422 * @upper_dev: new upper device
5424 * Adds a link to device which is upper to this one. The caller must hold
5425 * the RTNL lock. On a failure a negative errno code is returned.
5426 * On success the reference counts are adjusted and the function
5429 int netdev_upper_dev_link(struct net_device *dev,
5430 struct net_device *upper_dev)
5432 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5434 EXPORT_SYMBOL(netdev_upper_dev_link);
5437 * netdev_master_upper_dev_link - Add a master link to the upper device
5439 * @upper_dev: new upper device
5441 * Adds a link to device which is upper to this one. In this case, only
5442 * one master upper device can be linked, although other non-master devices
5443 * might be linked as well. The caller must hold the RTNL lock.
5444 * On a failure a negative errno code is returned. On success the reference
5445 * counts are adjusted and the function returns zero.
5447 int netdev_master_upper_dev_link(struct net_device *dev,
5448 struct net_device *upper_dev)
5450 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5452 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5454 int netdev_master_upper_dev_link_private(struct net_device *dev,
5455 struct net_device *upper_dev,
5458 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5460 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5463 * netdev_upper_dev_unlink - Removes a link to upper device
5465 * @upper_dev: new upper device
5467 * Removes a link to device which is upper to this one. The caller must hold
5470 void netdev_upper_dev_unlink(struct net_device *dev,
5471 struct net_device *upper_dev)
5473 struct netdev_adjacent *i, *j;
5476 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5478 /* Here is the tricky part. We must remove all dev's lower
5479 * devices from all upper_dev's upper devices and vice
5480 * versa, to maintain the graph relationship.
5482 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5483 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5484 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5486 /* remove also the devices itself from lower/upper device
5489 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5490 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5492 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5493 __netdev_adjacent_dev_unlink(dev, i->dev);
5495 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5497 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5500 * netdev_bonding_info_change - Dispatch event about slave change
5502 * @bonding_info: info to dispatch
5504 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5505 * The caller must hold the RTNL lock.
5507 void netdev_bonding_info_change(struct net_device *dev,
5508 struct netdev_bonding_info *bonding_info)
5510 struct netdev_notifier_bonding_info info;
5512 memcpy(&info.bonding_info, bonding_info,
5513 sizeof(struct netdev_bonding_info));
5514 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5517 EXPORT_SYMBOL(netdev_bonding_info_change);
5519 static void netdev_adjacent_add_links(struct net_device *dev)
5521 struct netdev_adjacent *iter;
5523 struct net *net = dev_net(dev);
5525 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5526 if (!net_eq(net,dev_net(iter->dev)))
5528 netdev_adjacent_sysfs_add(iter->dev, dev,
5529 &iter->dev->adj_list.lower);
5530 netdev_adjacent_sysfs_add(dev, iter->dev,
5531 &dev->adj_list.upper);
5534 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5535 if (!net_eq(net,dev_net(iter->dev)))
5537 netdev_adjacent_sysfs_add(iter->dev, dev,
5538 &iter->dev->adj_list.upper);
5539 netdev_adjacent_sysfs_add(dev, iter->dev,
5540 &dev->adj_list.lower);
5544 static void netdev_adjacent_del_links(struct net_device *dev)
5546 struct netdev_adjacent *iter;
5548 struct net *net = dev_net(dev);
5550 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5551 if (!net_eq(net,dev_net(iter->dev)))
5553 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5554 &iter->dev->adj_list.lower);
5555 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5556 &dev->adj_list.upper);
5559 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5560 if (!net_eq(net,dev_net(iter->dev)))
5562 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5563 &iter->dev->adj_list.upper);
5564 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5565 &dev->adj_list.lower);
5569 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5571 struct netdev_adjacent *iter;
5573 struct net *net = dev_net(dev);
5575 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5576 if (!net_eq(net,dev_net(iter->dev)))
5578 netdev_adjacent_sysfs_del(iter->dev, oldname,
5579 &iter->dev->adj_list.lower);
5580 netdev_adjacent_sysfs_add(iter->dev, dev,
5581 &iter->dev->adj_list.lower);
5584 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5585 if (!net_eq(net,dev_net(iter->dev)))
5587 netdev_adjacent_sysfs_del(iter->dev, oldname,
5588 &iter->dev->adj_list.upper);
5589 netdev_adjacent_sysfs_add(iter->dev, dev,
5590 &iter->dev->adj_list.upper);
5594 void *netdev_lower_dev_get_private(struct net_device *dev,
5595 struct net_device *lower_dev)
5597 struct netdev_adjacent *lower;
5601 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5605 return lower->private;
5607 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5610 int dev_get_nest_level(struct net_device *dev,
5611 bool (*type_check)(struct net_device *dev))
5613 struct net_device *lower = NULL;
5614 struct list_head *iter;
5620 netdev_for_each_lower_dev(dev, lower, iter) {
5621 nest = dev_get_nest_level(lower, type_check);
5622 if (max_nest < nest)
5626 if (type_check(dev))
5631 EXPORT_SYMBOL(dev_get_nest_level);
5633 static void dev_change_rx_flags(struct net_device *dev, int flags)
5635 const struct net_device_ops *ops = dev->netdev_ops;
5637 if (ops->ndo_change_rx_flags)
5638 ops->ndo_change_rx_flags(dev, flags);
5641 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5643 unsigned int old_flags = dev->flags;
5649 dev->flags |= IFF_PROMISC;
5650 dev->promiscuity += inc;
5651 if (dev->promiscuity == 0) {
5654 * If inc causes overflow, untouch promisc and return error.
5657 dev->flags &= ~IFF_PROMISC;
5659 dev->promiscuity -= inc;
5660 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5665 if (dev->flags != old_flags) {
5666 pr_info("device %s %s promiscuous mode\n",
5668 dev->flags & IFF_PROMISC ? "entered" : "left");
5669 if (audit_enabled) {
5670 current_uid_gid(&uid, &gid);
5671 audit_log(current->audit_context, GFP_ATOMIC,
5672 AUDIT_ANOM_PROMISCUOUS,
5673 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5674 dev->name, (dev->flags & IFF_PROMISC),
5675 (old_flags & IFF_PROMISC),
5676 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5677 from_kuid(&init_user_ns, uid),
5678 from_kgid(&init_user_ns, gid),
5679 audit_get_sessionid(current));
5682 dev_change_rx_flags(dev, IFF_PROMISC);
5685 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5690 * dev_set_promiscuity - update promiscuity count on a device
5694 * Add or remove promiscuity from a device. While the count in the device
5695 * remains above zero the interface remains promiscuous. Once it hits zero
5696 * the device reverts back to normal filtering operation. A negative inc
5697 * value is used to drop promiscuity on the device.
5698 * Return 0 if successful or a negative errno code on error.
5700 int dev_set_promiscuity(struct net_device *dev, int inc)
5702 unsigned int old_flags = dev->flags;
5705 err = __dev_set_promiscuity(dev, inc, true);
5708 if (dev->flags != old_flags)
5709 dev_set_rx_mode(dev);
5712 EXPORT_SYMBOL(dev_set_promiscuity);
5714 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5716 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5720 dev->flags |= IFF_ALLMULTI;
5721 dev->allmulti += inc;
5722 if (dev->allmulti == 0) {
5725 * If inc causes overflow, untouch allmulti and return error.
5728 dev->flags &= ~IFF_ALLMULTI;
5730 dev->allmulti -= inc;
5731 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5736 if (dev->flags ^ old_flags) {
5737 dev_change_rx_flags(dev, IFF_ALLMULTI);
5738 dev_set_rx_mode(dev);
5740 __dev_notify_flags(dev, old_flags,
5741 dev->gflags ^ old_gflags);
5747 * dev_set_allmulti - update allmulti count on a device
5751 * Add or remove reception of all multicast frames to a device. While the
5752 * count in the device remains above zero the interface remains listening
5753 * to all interfaces. Once it hits zero the device reverts back to normal
5754 * filtering operation. A negative @inc value is used to drop the counter
5755 * when releasing a resource needing all multicasts.
5756 * Return 0 if successful or a negative errno code on error.
5759 int dev_set_allmulti(struct net_device *dev, int inc)
5761 return __dev_set_allmulti(dev, inc, true);
5763 EXPORT_SYMBOL(dev_set_allmulti);
5766 * Upload unicast and multicast address lists to device and
5767 * configure RX filtering. When the device doesn't support unicast
5768 * filtering it is put in promiscuous mode while unicast addresses
5771 void __dev_set_rx_mode(struct net_device *dev)
5773 const struct net_device_ops *ops = dev->netdev_ops;
5775 /* dev_open will call this function so the list will stay sane. */
5776 if (!(dev->flags&IFF_UP))
5779 if (!netif_device_present(dev))
5782 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5783 /* Unicast addresses changes may only happen under the rtnl,
5784 * therefore calling __dev_set_promiscuity here is safe.
5786 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5787 __dev_set_promiscuity(dev, 1, false);
5788 dev->uc_promisc = true;
5789 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5790 __dev_set_promiscuity(dev, -1, false);
5791 dev->uc_promisc = false;
5795 if (ops->ndo_set_rx_mode)
5796 ops->ndo_set_rx_mode(dev);
5799 void dev_set_rx_mode(struct net_device *dev)
5801 netif_addr_lock_bh(dev);
5802 __dev_set_rx_mode(dev);
5803 netif_addr_unlock_bh(dev);
5807 * dev_get_flags - get flags reported to userspace
5810 * Get the combination of flag bits exported through APIs to userspace.
5812 unsigned int dev_get_flags(const struct net_device *dev)
5816 flags = (dev->flags & ~(IFF_PROMISC |
5821 (dev->gflags & (IFF_PROMISC |
5824 if (netif_running(dev)) {
5825 if (netif_oper_up(dev))
5826 flags |= IFF_RUNNING;
5827 if (netif_carrier_ok(dev))
5828 flags |= IFF_LOWER_UP;
5829 if (netif_dormant(dev))
5830 flags |= IFF_DORMANT;
5835 EXPORT_SYMBOL(dev_get_flags);
5837 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5839 unsigned int old_flags = dev->flags;
5845 * Set the flags on our device.
5848 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5849 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5851 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5855 * Load in the correct multicast list now the flags have changed.
5858 if ((old_flags ^ flags) & IFF_MULTICAST)
5859 dev_change_rx_flags(dev, IFF_MULTICAST);
5861 dev_set_rx_mode(dev);
5864 * Have we downed the interface. We handle IFF_UP ourselves
5865 * according to user attempts to set it, rather than blindly
5870 if ((old_flags ^ flags) & IFF_UP)
5871 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5873 if ((flags ^ dev->gflags) & IFF_PROMISC) {
5874 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5875 unsigned int old_flags = dev->flags;
5877 dev->gflags ^= IFF_PROMISC;
5879 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5880 if (dev->flags != old_flags)
5881 dev_set_rx_mode(dev);
5884 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5885 is important. Some (broken) drivers set IFF_PROMISC, when
5886 IFF_ALLMULTI is requested not asking us and not reporting.
5888 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5889 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5891 dev->gflags ^= IFF_ALLMULTI;
5892 __dev_set_allmulti(dev, inc, false);
5898 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5899 unsigned int gchanges)
5901 unsigned int changes = dev->flags ^ old_flags;
5904 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5906 if (changes & IFF_UP) {
5907 if (dev->flags & IFF_UP)
5908 call_netdevice_notifiers(NETDEV_UP, dev);
5910 call_netdevice_notifiers(NETDEV_DOWN, dev);
5913 if (dev->flags & IFF_UP &&
5914 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5915 struct netdev_notifier_change_info change_info;
5917 change_info.flags_changed = changes;
5918 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5924 * dev_change_flags - change device settings
5926 * @flags: device state flags
5928 * Change settings on device based state flags. The flags are
5929 * in the userspace exported format.
5931 int dev_change_flags(struct net_device *dev, unsigned int flags)
5934 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5936 ret = __dev_change_flags(dev, flags);
5940 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5941 __dev_notify_flags(dev, old_flags, changes);
5944 EXPORT_SYMBOL(dev_change_flags);
5946 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5948 const struct net_device_ops *ops = dev->netdev_ops;
5950 if (ops->ndo_change_mtu)
5951 return ops->ndo_change_mtu(dev, new_mtu);
5958 * dev_set_mtu - Change maximum transfer unit
5960 * @new_mtu: new transfer unit
5962 * Change the maximum transfer size of the network device.
5964 int dev_set_mtu(struct net_device *dev, int new_mtu)
5968 if (new_mtu == dev->mtu)
5971 /* MTU must be positive. */
5975 if (!netif_device_present(dev))
5978 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5979 err = notifier_to_errno(err);
5983 orig_mtu = dev->mtu;
5984 err = __dev_set_mtu(dev, new_mtu);
5987 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5988 err = notifier_to_errno(err);
5990 /* setting mtu back and notifying everyone again,
5991 * so that they have a chance to revert changes.
5993 __dev_set_mtu(dev, orig_mtu);
5994 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5999 EXPORT_SYMBOL(dev_set_mtu);
6002 * dev_set_group - Change group this device belongs to
6004 * @new_group: group this device should belong to
6006 void dev_set_group(struct net_device *dev, int new_group)
6008 dev->group = new_group;
6010 EXPORT_SYMBOL(dev_set_group);
6013 * dev_set_mac_address - Change Media Access Control Address
6017 * Change the hardware (MAC) address of the device
6019 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6021 const struct net_device_ops *ops = dev->netdev_ops;
6024 if (!ops->ndo_set_mac_address)
6026 if (sa->sa_family != dev->type)
6028 if (!netif_device_present(dev))
6030 err = ops->ndo_set_mac_address(dev, sa);
6033 dev->addr_assign_type = NET_ADDR_SET;
6034 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
6035 add_device_randomness(dev->dev_addr, dev->addr_len);
6038 EXPORT_SYMBOL(dev_set_mac_address);
6041 * dev_change_carrier - Change device carrier
6043 * @new_carrier: new value
6045 * Change device carrier
6047 int dev_change_carrier(struct net_device *dev, bool new_carrier)
6049 const struct net_device_ops *ops = dev->netdev_ops;
6051 if (!ops->ndo_change_carrier)
6053 if (!netif_device_present(dev))
6055 return ops->ndo_change_carrier(dev, new_carrier);
6057 EXPORT_SYMBOL(dev_change_carrier);
6060 * dev_get_phys_port_id - Get device physical port ID
6064 * Get device physical port ID
6066 int dev_get_phys_port_id(struct net_device *dev,
6067 struct netdev_phys_item_id *ppid)
6069 const struct net_device_ops *ops = dev->netdev_ops;
6071 if (!ops->ndo_get_phys_port_id)
6073 return ops->ndo_get_phys_port_id(dev, ppid);
6075 EXPORT_SYMBOL(dev_get_phys_port_id);
6078 * dev_get_phys_port_name - Get device physical port name
6082 * Get device physical port name
6084 int dev_get_phys_port_name(struct net_device *dev,
6085 char *name, size_t len)
6087 const struct net_device_ops *ops = dev->netdev_ops;
6089 if (!ops->ndo_get_phys_port_name)
6091 return ops->ndo_get_phys_port_name(dev, name, len);
6093 EXPORT_SYMBOL(dev_get_phys_port_name);
6096 * dev_new_index - allocate an ifindex
6097 * @net: the applicable net namespace
6099 * Returns a suitable unique value for a new device interface
6100 * number. The caller must hold the rtnl semaphore or the
6101 * dev_base_lock to be sure it remains unique.
6103 static int dev_new_index(struct net *net)
6105 int ifindex = net->ifindex;
6109 if (!__dev_get_by_index(net, ifindex))
6110 return net->ifindex = ifindex;
6114 /* Delayed registration/unregisteration */
6115 static LIST_HEAD(net_todo_list);
6116 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
6118 static void net_set_todo(struct net_device *dev)
6120 list_add_tail(&dev->todo_list, &net_todo_list);
6121 dev_net(dev)->dev_unreg_count++;
6124 static void rollback_registered_many(struct list_head *head)
6126 struct net_device *dev, *tmp;
6127 LIST_HEAD(close_head);
6129 BUG_ON(dev_boot_phase);
6132 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
6133 /* Some devices call without registering
6134 * for initialization unwind. Remove those
6135 * devices and proceed with the remaining.
6137 if (dev->reg_state == NETREG_UNINITIALIZED) {
6138 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6142 list_del(&dev->unreg_list);
6145 dev->dismantle = true;
6146 BUG_ON(dev->reg_state != NETREG_REGISTERED);
6149 /* If device is running, close it first. */
6150 list_for_each_entry(dev, head, unreg_list)
6151 list_add_tail(&dev->close_list, &close_head);
6152 dev_close_many(&close_head, true);
6154 list_for_each_entry(dev, head, unreg_list) {
6155 /* And unlink it from device chain. */
6156 unlist_netdevice(dev);
6158 dev->reg_state = NETREG_UNREGISTERING;
6163 list_for_each_entry(dev, head, unreg_list) {
6164 struct sk_buff *skb = NULL;
6166 /* Shutdown queueing discipline. */
6170 /* Notify protocols, that we are about to destroy
6171 this device. They should clean all the things.
6173 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6175 if (!dev->rtnl_link_ops ||
6176 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6177 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6181 * Flush the unicast and multicast chains
6186 if (dev->netdev_ops->ndo_uninit)
6187 dev->netdev_ops->ndo_uninit(dev);
6190 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
6192 /* Notifier chain MUST detach us all upper devices. */
6193 WARN_ON(netdev_has_any_upper_dev(dev));
6195 /* Remove entries from kobject tree */
6196 netdev_unregister_kobject(dev);
6198 /* Remove XPS queueing entries */
6199 netif_reset_xps_queues_gt(dev, 0);
6205 list_for_each_entry(dev, head, unreg_list)
6209 static void rollback_registered(struct net_device *dev)
6213 list_add(&dev->unreg_list, &single);
6214 rollback_registered_many(&single);
6218 static netdev_features_t netdev_fix_features(struct net_device *dev,
6219 netdev_features_t features)
6221 /* Fix illegal checksum combinations */
6222 if ((features & NETIF_F_HW_CSUM) &&
6223 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6224 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
6225 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6228 /* TSO requires that SG is present as well. */
6229 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6230 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
6231 features &= ~NETIF_F_ALL_TSO;
6234 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6235 !(features & NETIF_F_IP_CSUM)) {
6236 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6237 features &= ~NETIF_F_TSO;
6238 features &= ~NETIF_F_TSO_ECN;
6241 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6242 !(features & NETIF_F_IPV6_CSUM)) {
6243 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6244 features &= ~NETIF_F_TSO6;
6247 /* TSO ECN requires that TSO is present as well. */
6248 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6249 features &= ~NETIF_F_TSO_ECN;
6251 /* Software GSO depends on SG. */
6252 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6253 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
6254 features &= ~NETIF_F_GSO;
6257 /* UFO needs SG and checksumming */
6258 if (features & NETIF_F_UFO) {
6259 /* maybe split UFO into V4 and V6? */
6260 if (!((features & NETIF_F_GEN_CSUM) ||
6261 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6262 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6264 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6265 features &= ~NETIF_F_UFO;
6268 if (!(features & NETIF_F_SG)) {
6270 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6271 features &= ~NETIF_F_UFO;
6275 #ifdef CONFIG_NET_RX_BUSY_POLL
6276 if (dev->netdev_ops->ndo_busy_poll)
6277 features |= NETIF_F_BUSY_POLL;
6280 features &= ~NETIF_F_BUSY_POLL;
6285 int __netdev_update_features(struct net_device *dev)
6287 netdev_features_t features;
6292 features = netdev_get_wanted_features(dev);
6294 if (dev->netdev_ops->ndo_fix_features)
6295 features = dev->netdev_ops->ndo_fix_features(dev, features);
6297 /* driver might be less strict about feature dependencies */
6298 features = netdev_fix_features(dev, features);
6300 if (dev->features == features)
6303 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6304 &dev->features, &features);
6306 if (dev->netdev_ops->ndo_set_features)
6307 err = dev->netdev_ops->ndo_set_features(dev, features);
6309 if (unlikely(err < 0)) {
6311 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6312 err, &features, &dev->features);
6317 dev->features = features;
6323 * netdev_update_features - recalculate device features
6324 * @dev: the device to check
6326 * Recalculate dev->features set and send notifications if it
6327 * has changed. Should be called after driver or hardware dependent
6328 * conditions might have changed that influence the features.
6330 void netdev_update_features(struct net_device *dev)
6332 if (__netdev_update_features(dev))
6333 netdev_features_change(dev);
6335 EXPORT_SYMBOL(netdev_update_features);
6338 * netdev_change_features - recalculate device features
6339 * @dev: the device to check
6341 * Recalculate dev->features set and send notifications even
6342 * if they have not changed. Should be called instead of
6343 * netdev_update_features() if also dev->vlan_features might
6344 * have changed to allow the changes to be propagated to stacked
6347 void netdev_change_features(struct net_device *dev)
6349 __netdev_update_features(dev);
6350 netdev_features_change(dev);
6352 EXPORT_SYMBOL(netdev_change_features);
6355 * netif_stacked_transfer_operstate - transfer operstate
6356 * @rootdev: the root or lower level device to transfer state from
6357 * @dev: the device to transfer operstate to
6359 * Transfer operational state from root to device. This is normally
6360 * called when a stacking relationship exists between the root
6361 * device and the device(a leaf device).
6363 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6364 struct net_device *dev)
6366 if (rootdev->operstate == IF_OPER_DORMANT)
6367 netif_dormant_on(dev);
6369 netif_dormant_off(dev);
6371 if (netif_carrier_ok(rootdev)) {
6372 if (!netif_carrier_ok(dev))
6373 netif_carrier_on(dev);
6375 if (netif_carrier_ok(dev))
6376 netif_carrier_off(dev);
6379 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6382 static int netif_alloc_rx_queues(struct net_device *dev)
6384 unsigned int i, count = dev->num_rx_queues;
6385 struct netdev_rx_queue *rx;
6386 size_t sz = count * sizeof(*rx);
6390 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6398 for (i = 0; i < count; i++)
6404 static void netdev_init_one_queue(struct net_device *dev,
6405 struct netdev_queue *queue, void *_unused)
6407 /* Initialize queue lock */
6408 spin_lock_init(&queue->_xmit_lock);
6409 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6410 queue->xmit_lock_owner = -1;
6411 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6414 dql_init(&queue->dql, HZ);
6418 static void netif_free_tx_queues(struct net_device *dev)
6423 static int netif_alloc_netdev_queues(struct net_device *dev)
6425 unsigned int count = dev->num_tx_queues;
6426 struct netdev_queue *tx;
6427 size_t sz = count * sizeof(*tx);
6429 BUG_ON(count < 1 || count > 0xffff);
6431 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6439 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6440 spin_lock_init(&dev->tx_global_lock);
6445 void netif_tx_stop_all_queues(struct net_device *dev)
6449 for (i = 0; i < dev->num_tx_queues; i++) {
6450 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6451 netif_tx_stop_queue(txq);
6454 EXPORT_SYMBOL(netif_tx_stop_all_queues);
6457 * register_netdevice - register a network device
6458 * @dev: device to register
6460 * Take a completed network device structure and add it to the kernel
6461 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6462 * chain. 0 is returned on success. A negative errno code is returned
6463 * on a failure to set up the device, or if the name is a duplicate.
6465 * Callers must hold the rtnl semaphore. You may want
6466 * register_netdev() instead of this.
6469 * The locking appears insufficient to guarantee two parallel registers
6470 * will not get the same name.
6473 int register_netdevice(struct net_device *dev)
6476 struct net *net = dev_net(dev);
6478 BUG_ON(dev_boot_phase);
6483 /* When net_device's are persistent, this will be fatal. */
6484 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6487 spin_lock_init(&dev->addr_list_lock);
6488 netdev_set_addr_lockdep_class(dev);
6490 ret = dev_get_valid_name(net, dev, dev->name);
6494 /* Init, if this function is available */
6495 if (dev->netdev_ops->ndo_init) {
6496 ret = dev->netdev_ops->ndo_init(dev);
6504 if (((dev->hw_features | dev->features) &
6505 NETIF_F_HW_VLAN_CTAG_FILTER) &&
6506 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6507 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6508 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6515 dev->ifindex = dev_new_index(net);
6516 else if (__dev_get_by_index(net, dev->ifindex))
6519 /* Transfer changeable features to wanted_features and enable
6520 * software offloads (GSO and GRO).
6522 dev->hw_features |= NETIF_F_SOFT_FEATURES;
6523 dev->features |= NETIF_F_SOFT_FEATURES;
6524 dev->wanted_features = dev->features & dev->hw_features;
6526 if (!(dev->flags & IFF_LOOPBACK)) {
6527 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6530 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6532 dev->vlan_features |= NETIF_F_HIGHDMA;
6534 /* Make NETIF_F_SG inheritable to tunnel devices.
6536 dev->hw_enc_features |= NETIF_F_SG;
6538 /* Make NETIF_F_SG inheritable to MPLS.
6540 dev->mpls_features |= NETIF_F_SG;
6542 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6543 ret = notifier_to_errno(ret);
6547 ret = netdev_register_kobject(dev);
6550 dev->reg_state = NETREG_REGISTERED;
6552 __netdev_update_features(dev);
6555 * Default initial state at registry is that the
6556 * device is present.
6559 set_bit(__LINK_STATE_PRESENT, &dev->state);
6561 linkwatch_init_dev(dev);
6563 dev_init_scheduler(dev);
6565 list_netdevice(dev);
6566 add_device_randomness(dev->dev_addr, dev->addr_len);
6568 /* If the device has permanent device address, driver should
6569 * set dev_addr and also addr_assign_type should be set to
6570 * NET_ADDR_PERM (default value).
6572 if (dev->addr_assign_type == NET_ADDR_PERM)
6573 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6575 /* Notify protocols, that a new device appeared. */
6576 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6577 ret = notifier_to_errno(ret);
6579 rollback_registered(dev);
6580 dev->reg_state = NETREG_UNREGISTERED;
6583 * Prevent userspace races by waiting until the network
6584 * device is fully setup before sending notifications.
6586 if (!dev->rtnl_link_ops ||
6587 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6588 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6594 if (dev->netdev_ops->ndo_uninit)
6595 dev->netdev_ops->ndo_uninit(dev);
6598 EXPORT_SYMBOL(register_netdevice);
6601 * init_dummy_netdev - init a dummy network device for NAPI
6602 * @dev: device to init
6604 * This takes a network device structure and initialize the minimum
6605 * amount of fields so it can be used to schedule NAPI polls without
6606 * registering a full blown interface. This is to be used by drivers
6607 * that need to tie several hardware interfaces to a single NAPI
6608 * poll scheduler due to HW limitations.
6610 int init_dummy_netdev(struct net_device *dev)
6612 /* Clear everything. Note we don't initialize spinlocks
6613 * are they aren't supposed to be taken by any of the
6614 * NAPI code and this dummy netdev is supposed to be
6615 * only ever used for NAPI polls
6617 memset(dev, 0, sizeof(struct net_device));
6619 /* make sure we BUG if trying to hit standard
6620 * register/unregister code path
6622 dev->reg_state = NETREG_DUMMY;
6624 /* NAPI wants this */
6625 INIT_LIST_HEAD(&dev->napi_list);
6627 /* a dummy interface is started by default */
6628 set_bit(__LINK_STATE_PRESENT, &dev->state);
6629 set_bit(__LINK_STATE_START, &dev->state);
6631 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6632 * because users of this 'device' dont need to change
6638 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6642 * register_netdev - register a network device
6643 * @dev: device to register
6645 * Take a completed network device structure and add it to the kernel
6646 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6647 * chain. 0 is returned on success. A negative errno code is returned
6648 * on a failure to set up the device, or if the name is a duplicate.
6650 * This is a wrapper around register_netdevice that takes the rtnl semaphore
6651 * and expands the device name if you passed a format string to
6654 int register_netdev(struct net_device *dev)
6659 err = register_netdevice(dev);
6663 EXPORT_SYMBOL(register_netdev);
6665 int netdev_refcnt_read(const struct net_device *dev)
6669 for_each_possible_cpu(i)
6670 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6673 EXPORT_SYMBOL(netdev_refcnt_read);
6676 * netdev_wait_allrefs - wait until all references are gone.
6677 * @dev: target net_device
6679 * This is called when unregistering network devices.
6681 * Any protocol or device that holds a reference should register
6682 * for netdevice notification, and cleanup and put back the
6683 * reference if they receive an UNREGISTER event.
6684 * We can get stuck here if buggy protocols don't correctly
6687 static void netdev_wait_allrefs(struct net_device *dev)
6689 unsigned long rebroadcast_time, warning_time;
6692 linkwatch_forget_dev(dev);
6694 rebroadcast_time = warning_time = jiffies;
6695 refcnt = netdev_refcnt_read(dev);
6697 while (refcnt != 0) {
6698 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6701 /* Rebroadcast unregister notification */
6702 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6708 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6709 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6711 /* We must not have linkwatch events
6712 * pending on unregister. If this
6713 * happens, we simply run the queue
6714 * unscheduled, resulting in a noop
6717 linkwatch_run_queue();
6722 rebroadcast_time = jiffies;
6727 refcnt = netdev_refcnt_read(dev);
6729 if (time_after(jiffies, warning_time + 10 * HZ)) {
6730 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6732 warning_time = jiffies;
6741 * register_netdevice(x1);
6742 * register_netdevice(x2);
6744 * unregister_netdevice(y1);
6745 * unregister_netdevice(y2);
6751 * We are invoked by rtnl_unlock().
6752 * This allows us to deal with problems:
6753 * 1) We can delete sysfs objects which invoke hotplug
6754 * without deadlocking with linkwatch via keventd.
6755 * 2) Since we run with the RTNL semaphore not held, we can sleep
6756 * safely in order to wait for the netdev refcnt to drop to zero.
6758 * We must not return until all unregister events added during
6759 * the interval the lock was held have been completed.
6761 void netdev_run_todo(void)
6763 struct list_head list;
6765 /* Snapshot list, allow later requests */
6766 list_replace_init(&net_todo_list, &list);
6771 /* Wait for rcu callbacks to finish before next phase */
6772 if (!list_empty(&list))
6775 while (!list_empty(&list)) {
6776 struct net_device *dev
6777 = list_first_entry(&list, struct net_device, todo_list);
6778 list_del(&dev->todo_list);
6781 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6784 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6785 pr_err("network todo '%s' but state %d\n",
6786 dev->name, dev->reg_state);
6791 dev->reg_state = NETREG_UNREGISTERED;
6793 on_each_cpu(flush_backlog, dev, 1);
6795 netdev_wait_allrefs(dev);
6798 BUG_ON(netdev_refcnt_read(dev));
6799 BUG_ON(!list_empty(&dev->ptype_all));
6800 BUG_ON(!list_empty(&dev->ptype_specific));
6801 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6802 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6803 WARN_ON(dev->dn_ptr);
6805 if (dev->destructor)
6806 dev->destructor(dev);
6808 /* Report a network device has been unregistered */
6810 dev_net(dev)->dev_unreg_count--;
6812 wake_up(&netdev_unregistering_wq);
6814 /* Free network device */
6815 kobject_put(&dev->dev.kobj);
6819 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6820 * fields in the same order, with only the type differing.
6822 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6823 const struct net_device_stats *netdev_stats)
6825 #if BITS_PER_LONG == 64
6826 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6827 memcpy(stats64, netdev_stats, sizeof(*stats64));
6829 size_t i, n = sizeof(*stats64) / sizeof(u64);
6830 const unsigned long *src = (const unsigned long *)netdev_stats;
6831 u64 *dst = (u64 *)stats64;
6833 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6834 sizeof(*stats64) / sizeof(u64));
6835 for (i = 0; i < n; i++)
6839 EXPORT_SYMBOL(netdev_stats_to_stats64);
6842 * dev_get_stats - get network device statistics
6843 * @dev: device to get statistics from
6844 * @storage: place to store stats
6846 * Get network statistics from device. Return @storage.
6847 * The device driver may provide its own method by setting
6848 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6849 * otherwise the internal statistics structure is used.
6851 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6852 struct rtnl_link_stats64 *storage)
6854 const struct net_device_ops *ops = dev->netdev_ops;
6856 if (ops->ndo_get_stats64) {
6857 memset(storage, 0, sizeof(*storage));
6858 ops->ndo_get_stats64(dev, storage);
6859 } else if (ops->ndo_get_stats) {
6860 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6862 netdev_stats_to_stats64(storage, &dev->stats);
6864 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6865 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6868 EXPORT_SYMBOL(dev_get_stats);
6870 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6872 struct netdev_queue *queue = dev_ingress_queue(dev);
6874 #ifdef CONFIG_NET_CLS_ACT
6877 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6880 netdev_init_one_queue(dev, queue, NULL);
6881 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
6882 queue->qdisc_sleeping = &noop_qdisc;
6883 rcu_assign_pointer(dev->ingress_queue, queue);
6888 static const struct ethtool_ops default_ethtool_ops;
6890 void netdev_set_default_ethtool_ops(struct net_device *dev,
6891 const struct ethtool_ops *ops)
6893 if (dev->ethtool_ops == &default_ethtool_ops)
6894 dev->ethtool_ops = ops;
6896 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6898 void netdev_freemem(struct net_device *dev)
6900 char *addr = (char *)dev - dev->padded;
6906 * alloc_netdev_mqs - allocate network device
6907 * @sizeof_priv: size of private data to allocate space for
6908 * @name: device name format string
6909 * @name_assign_type: origin of device name
6910 * @setup: callback to initialize device
6911 * @txqs: the number of TX subqueues to allocate
6912 * @rxqs: the number of RX subqueues to allocate
6914 * Allocates a struct net_device with private data area for driver use
6915 * and performs basic initialization. Also allocates subqueue structs
6916 * for each queue on the device.
6918 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6919 unsigned char name_assign_type,
6920 void (*setup)(struct net_device *),
6921 unsigned int txqs, unsigned int rxqs)
6923 struct net_device *dev;
6925 struct net_device *p;
6927 BUG_ON(strlen(name) >= sizeof(dev->name));
6930 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6936 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6941 alloc_size = sizeof(struct net_device);
6943 /* ensure 32-byte alignment of private area */
6944 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6945 alloc_size += sizeof_priv;
6947 /* ensure 32-byte alignment of whole construct */
6948 alloc_size += NETDEV_ALIGN - 1;
6950 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6952 p = vzalloc(alloc_size);
6956 dev = PTR_ALIGN(p, NETDEV_ALIGN);
6957 dev->padded = (char *)dev - (char *)p;
6959 dev->pcpu_refcnt = alloc_percpu(int);
6960 if (!dev->pcpu_refcnt)
6963 if (dev_addr_init(dev))
6969 dev_net_set(dev, &init_net);
6971 dev->gso_max_size = GSO_MAX_SIZE;
6972 dev->gso_max_segs = GSO_MAX_SEGS;
6973 dev->gso_min_segs = 0;
6975 INIT_LIST_HEAD(&dev->napi_list);
6976 INIT_LIST_HEAD(&dev->unreg_list);
6977 INIT_LIST_HEAD(&dev->close_list);
6978 INIT_LIST_HEAD(&dev->link_watch_list);
6979 INIT_LIST_HEAD(&dev->adj_list.upper);
6980 INIT_LIST_HEAD(&dev->adj_list.lower);
6981 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6982 INIT_LIST_HEAD(&dev->all_adj_list.lower);
6983 INIT_LIST_HEAD(&dev->ptype_all);
6984 INIT_LIST_HEAD(&dev->ptype_specific);
6985 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
6988 dev->num_tx_queues = txqs;
6989 dev->real_num_tx_queues = txqs;
6990 if (netif_alloc_netdev_queues(dev))
6994 dev->num_rx_queues = rxqs;
6995 dev->real_num_rx_queues = rxqs;
6996 if (netif_alloc_rx_queues(dev))
7000 strcpy(dev->name, name);
7001 dev->name_assign_type = name_assign_type;
7002 dev->group = INIT_NETDEV_GROUP;
7003 if (!dev->ethtool_ops)
7004 dev->ethtool_ops = &default_ethtool_ops;
7006 nf_hook_ingress_init(dev);
7015 free_percpu(dev->pcpu_refcnt);
7017 netdev_freemem(dev);
7020 EXPORT_SYMBOL(alloc_netdev_mqs);
7023 * free_netdev - free network device
7026 * This function does the last stage of destroying an allocated device
7027 * interface. The reference to the device object is released.
7028 * If this is the last reference then it will be freed.
7030 void free_netdev(struct net_device *dev)
7032 struct napi_struct *p, *n;
7034 netif_free_tx_queues(dev);
7039 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
7041 /* Flush device addresses */
7042 dev_addr_flush(dev);
7044 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7047 free_percpu(dev->pcpu_refcnt);
7048 dev->pcpu_refcnt = NULL;
7050 /* Compatibility with error handling in drivers */
7051 if (dev->reg_state == NETREG_UNINITIALIZED) {
7052 netdev_freemem(dev);
7056 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7057 dev->reg_state = NETREG_RELEASED;
7059 /* will free via device release */
7060 put_device(&dev->dev);
7062 EXPORT_SYMBOL(free_netdev);
7065 * synchronize_net - Synchronize with packet receive processing
7067 * Wait for packets currently being received to be done.
7068 * Does not block later packets from starting.
7070 void synchronize_net(void)
7073 if (rtnl_is_locked())
7074 synchronize_rcu_expedited();
7078 EXPORT_SYMBOL(synchronize_net);
7081 * unregister_netdevice_queue - remove device from the kernel
7085 * This function shuts down a device interface and removes it
7086 * from the kernel tables.
7087 * If head not NULL, device is queued to be unregistered later.
7089 * Callers must hold the rtnl semaphore. You may want
7090 * unregister_netdev() instead of this.
7093 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
7098 list_move_tail(&dev->unreg_list, head);
7100 rollback_registered(dev);
7101 /* Finish processing unregister after unlock */
7105 EXPORT_SYMBOL(unregister_netdevice_queue);
7108 * unregister_netdevice_many - unregister many devices
7109 * @head: list of devices
7111 * Note: As most callers use a stack allocated list_head,
7112 * we force a list_del() to make sure stack wont be corrupted later.
7114 void unregister_netdevice_many(struct list_head *head)
7116 struct net_device *dev;
7118 if (!list_empty(head)) {
7119 rollback_registered_many(head);
7120 list_for_each_entry(dev, head, unreg_list)
7125 EXPORT_SYMBOL(unregister_netdevice_many);
7128 * unregister_netdev - remove device from the kernel
7131 * This function shuts down a device interface and removes it
7132 * from the kernel tables.
7134 * This is just a wrapper for unregister_netdevice that takes
7135 * the rtnl semaphore. In general you want to use this and not
7136 * unregister_netdevice.
7138 void unregister_netdev(struct net_device *dev)
7141 unregister_netdevice(dev);
7144 EXPORT_SYMBOL(unregister_netdev);
7147 * dev_change_net_namespace - move device to different nethost namespace
7149 * @net: network namespace
7150 * @pat: If not NULL name pattern to try if the current device name
7151 * is already taken in the destination network namespace.
7153 * This function shuts down a device interface and moves it
7154 * to a new network namespace. On success 0 is returned, on
7155 * a failure a netagive errno code is returned.
7157 * Callers must hold the rtnl semaphore.
7160 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7166 /* Don't allow namespace local devices to be moved. */
7168 if (dev->features & NETIF_F_NETNS_LOCAL)
7171 /* Ensure the device has been registrered */
7172 if (dev->reg_state != NETREG_REGISTERED)
7175 /* Get out if there is nothing todo */
7177 if (net_eq(dev_net(dev), net))
7180 /* Pick the destination device name, and ensure
7181 * we can use it in the destination network namespace.
7184 if (__dev_get_by_name(net, dev->name)) {
7185 /* We get here if we can't use the current device name */
7188 if (dev_get_valid_name(net, dev, pat) < 0)
7193 * And now a mini version of register_netdevice unregister_netdevice.
7196 /* If device is running close it first. */
7199 /* And unlink it from device chain */
7201 unlist_netdevice(dev);
7205 /* Shutdown queueing discipline. */
7208 /* Notify protocols, that we are about to destroy
7209 this device. They should clean all the things.
7211 Note that dev->reg_state stays at NETREG_REGISTERED.
7212 This is wanted because this way 8021q and macvlan know
7213 the device is just moving and can keep their slaves up.
7215 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7217 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7218 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
7221 * Flush the unicast and multicast chains
7226 /* Send a netdev-removed uevent to the old namespace */
7227 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
7228 netdev_adjacent_del_links(dev);
7230 /* Actually switch the network namespace */
7231 dev_net_set(dev, net);
7233 /* If there is an ifindex conflict assign a new one */
7234 if (__dev_get_by_index(net, dev->ifindex))
7235 dev->ifindex = dev_new_index(net);
7237 /* Send a netdev-add uevent to the new namespace */
7238 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
7239 netdev_adjacent_add_links(dev);
7241 /* Fixup kobjects */
7242 err = device_rename(&dev->dev, dev->name);
7245 /* Add the device back in the hashes */
7246 list_netdevice(dev);
7248 /* Notify protocols, that a new device appeared. */
7249 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7252 * Prevent userspace races by waiting until the network
7253 * device is fully setup before sending notifications.
7255 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7262 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
7264 static int dev_cpu_callback(struct notifier_block *nfb,
7265 unsigned long action,
7268 struct sk_buff **list_skb;
7269 struct sk_buff *skb;
7270 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7271 struct softnet_data *sd, *oldsd;
7273 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
7276 local_irq_disable();
7277 cpu = smp_processor_id();
7278 sd = &per_cpu(softnet_data, cpu);
7279 oldsd = &per_cpu(softnet_data, oldcpu);
7281 /* Find end of our completion_queue. */
7282 list_skb = &sd->completion_queue;
7284 list_skb = &(*list_skb)->next;
7285 /* Append completion queue from offline CPU. */
7286 *list_skb = oldsd->completion_queue;
7287 oldsd->completion_queue = NULL;
7289 /* Append output queue from offline CPU. */
7290 if (oldsd->output_queue) {
7291 *sd->output_queue_tailp = oldsd->output_queue;
7292 sd->output_queue_tailp = oldsd->output_queue_tailp;
7293 oldsd->output_queue = NULL;
7294 oldsd->output_queue_tailp = &oldsd->output_queue;
7296 /* Append NAPI poll list from offline CPU, with one exception :
7297 * process_backlog() must be called by cpu owning percpu backlog.
7298 * We properly handle process_queue & input_pkt_queue later.
7300 while (!list_empty(&oldsd->poll_list)) {
7301 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7305 list_del_init(&napi->poll_list);
7306 if (napi->poll == process_backlog)
7309 ____napi_schedule(sd, napi);
7312 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7315 /* Process offline CPU's input_pkt_queue */
7316 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
7318 input_queue_head_incr(oldsd);
7320 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
7322 input_queue_head_incr(oldsd);
7330 * netdev_increment_features - increment feature set by one
7331 * @all: current feature set
7332 * @one: new feature set
7333 * @mask: mask feature set
7335 * Computes a new feature set after adding a device with feature set
7336 * @one to the master device with current feature set @all. Will not
7337 * enable anything that is off in @mask. Returns the new feature set.
7339 netdev_features_t netdev_increment_features(netdev_features_t all,
7340 netdev_features_t one, netdev_features_t mask)
7342 if (mask & NETIF_F_GEN_CSUM)
7343 mask |= NETIF_F_ALL_CSUM;
7344 mask |= NETIF_F_VLAN_CHALLENGED;
7346 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7347 all &= one | ~NETIF_F_ALL_FOR_ALL;
7349 /* If one device supports hw checksumming, set for all. */
7350 if (all & NETIF_F_GEN_CSUM)
7351 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7355 EXPORT_SYMBOL(netdev_increment_features);
7357 static struct hlist_head * __net_init netdev_create_hash(void)
7360 struct hlist_head *hash;
7362 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7364 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7365 INIT_HLIST_HEAD(&hash[i]);
7370 /* Initialize per network namespace state */
7371 static int __net_init netdev_init(struct net *net)
7373 if (net != &init_net)
7374 INIT_LIST_HEAD(&net->dev_base_head);
7376 net->dev_name_head = netdev_create_hash();
7377 if (net->dev_name_head == NULL)
7380 net->dev_index_head = netdev_create_hash();
7381 if (net->dev_index_head == NULL)
7387 kfree(net->dev_name_head);
7393 * netdev_drivername - network driver for the device
7394 * @dev: network device
7396 * Determine network driver for device.
7398 const char *netdev_drivername(const struct net_device *dev)
7400 const struct device_driver *driver;
7401 const struct device *parent;
7402 const char *empty = "";
7404 parent = dev->dev.parent;
7408 driver = parent->driver;
7409 if (driver && driver->name)
7410 return driver->name;
7414 static void __netdev_printk(const char *level, const struct net_device *dev,
7415 struct va_format *vaf)
7417 if (dev && dev->dev.parent) {
7418 dev_printk_emit(level[1] - '0',
7421 dev_driver_string(dev->dev.parent),
7422 dev_name(dev->dev.parent),
7423 netdev_name(dev), netdev_reg_state(dev),
7426 printk("%s%s%s: %pV",
7427 level, netdev_name(dev), netdev_reg_state(dev), vaf);
7429 printk("%s(NULL net_device): %pV", level, vaf);
7433 void netdev_printk(const char *level, const struct net_device *dev,
7434 const char *format, ...)
7436 struct va_format vaf;
7439 va_start(args, format);
7444 __netdev_printk(level, dev, &vaf);
7448 EXPORT_SYMBOL(netdev_printk);
7450 #define define_netdev_printk_level(func, level) \
7451 void func(const struct net_device *dev, const char *fmt, ...) \
7453 struct va_format vaf; \
7456 va_start(args, fmt); \
7461 __netdev_printk(level, dev, &vaf); \
7465 EXPORT_SYMBOL(func);
7467 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7468 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7469 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7470 define_netdev_printk_level(netdev_err, KERN_ERR);
7471 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7472 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7473 define_netdev_printk_level(netdev_info, KERN_INFO);
7475 static void __net_exit netdev_exit(struct net *net)
7477 kfree(net->dev_name_head);
7478 kfree(net->dev_index_head);
7481 static struct pernet_operations __net_initdata netdev_net_ops = {
7482 .init = netdev_init,
7483 .exit = netdev_exit,
7486 static void __net_exit default_device_exit(struct net *net)
7488 struct net_device *dev, *aux;
7490 * Push all migratable network devices back to the
7491 * initial network namespace
7494 for_each_netdev_safe(net, dev, aux) {
7496 char fb_name[IFNAMSIZ];
7498 /* Ignore unmoveable devices (i.e. loopback) */
7499 if (dev->features & NETIF_F_NETNS_LOCAL)
7502 /* Leave virtual devices for the generic cleanup */
7503 if (dev->rtnl_link_ops)
7506 /* Push remaining network devices to init_net */
7507 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7508 err = dev_change_net_namespace(dev, &init_net, fb_name);
7510 pr_emerg("%s: failed to move %s to init_net: %d\n",
7511 __func__, dev->name, err);
7518 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7520 /* Return with the rtnl_lock held when there are no network
7521 * devices unregistering in any network namespace in net_list.
7525 DEFINE_WAIT_FUNC(wait, woken_wake_function);
7527 add_wait_queue(&netdev_unregistering_wq, &wait);
7529 unregistering = false;
7531 list_for_each_entry(net, net_list, exit_list) {
7532 if (net->dev_unreg_count > 0) {
7533 unregistering = true;
7541 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
7543 remove_wait_queue(&netdev_unregistering_wq, &wait);
7546 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7548 /* At exit all network devices most be removed from a network
7549 * namespace. Do this in the reverse order of registration.
7550 * Do this across as many network namespaces as possible to
7551 * improve batching efficiency.
7553 struct net_device *dev;
7555 LIST_HEAD(dev_kill_list);
7557 /* To prevent network device cleanup code from dereferencing
7558 * loopback devices or network devices that have been freed
7559 * wait here for all pending unregistrations to complete,
7560 * before unregistring the loopback device and allowing the
7561 * network namespace be freed.
7563 * The netdev todo list containing all network devices
7564 * unregistrations that happen in default_device_exit_batch
7565 * will run in the rtnl_unlock() at the end of
7566 * default_device_exit_batch.
7568 rtnl_lock_unregistering(net_list);
7569 list_for_each_entry(net, net_list, exit_list) {
7570 for_each_netdev_reverse(net, dev) {
7571 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7572 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7574 unregister_netdevice_queue(dev, &dev_kill_list);
7577 unregister_netdevice_many(&dev_kill_list);
7581 static struct pernet_operations __net_initdata default_device_ops = {
7582 .exit = default_device_exit,
7583 .exit_batch = default_device_exit_batch,
7587 * Initialize the DEV module. At boot time this walks the device list and
7588 * unhooks any devices that fail to initialise (normally hardware not
7589 * present) and leaves us with a valid list of present and active devices.
7594 * This is called single threaded during boot, so no need
7595 * to take the rtnl semaphore.
7597 static int __init net_dev_init(void)
7599 int i, rc = -ENOMEM;
7601 BUG_ON(!dev_boot_phase);
7603 if (dev_proc_init())
7606 if (netdev_kobject_init())
7609 INIT_LIST_HEAD(&ptype_all);
7610 for (i = 0; i < PTYPE_HASH_SIZE; i++)
7611 INIT_LIST_HEAD(&ptype_base[i]);
7613 INIT_LIST_HEAD(&offload_base);
7615 if (register_pernet_subsys(&netdev_net_ops))
7619 * Initialise the packet receive queues.
7622 for_each_possible_cpu(i) {
7623 struct softnet_data *sd = &per_cpu(softnet_data, i);
7625 skb_queue_head_init(&sd->input_pkt_queue);
7626 skb_queue_head_init(&sd->process_queue);
7627 INIT_LIST_HEAD(&sd->poll_list);
7628 sd->output_queue_tailp = &sd->output_queue;
7630 sd->csd.func = rps_trigger_softirq;
7635 sd->backlog.poll = process_backlog;
7636 sd->backlog.weight = weight_p;
7641 /* The loopback device is special if any other network devices
7642 * is present in a network namespace the loopback device must
7643 * be present. Since we now dynamically allocate and free the
7644 * loopback device ensure this invariant is maintained by
7645 * keeping the loopback device as the first device on the
7646 * list of network devices. Ensuring the loopback devices
7647 * is the first device that appears and the last network device
7650 if (register_pernet_device(&loopback_net_ops))
7653 if (register_pernet_device(&default_device_ops))
7656 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7657 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7659 hotcpu_notifier(dev_cpu_callback, 0);
7666 subsys_initcall(net_dev_init);