]> asedeno.scripts.mit.edu Git - linux.git/blob - net/core/dev.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / net / core / dev.c
1 /*
2  *      NET3    Protocol independent device support routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *      Derived from the non IP parts of dev.c 1.0.19
10  *              Authors:        Ross Biro
11  *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *      Additional Authors:
15  *              Florian la Roche <rzsfl@rz.uni-sb.de>
16  *              Alan Cox <gw4pts@gw4pts.ampr.org>
17  *              David Hinds <dahinds@users.sourceforge.net>
18  *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *              Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *      Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *                                      to 2 if register_netdev gets called
25  *                                      before net_dev_init & also removed a
26  *                                      few lines of code in the process.
27  *              Alan Cox        :       device private ioctl copies fields back.
28  *              Alan Cox        :       Transmit queue code does relevant
29  *                                      stunts to keep the queue safe.
30  *              Alan Cox        :       Fixed double lock.
31  *              Alan Cox        :       Fixed promisc NULL pointer trap
32  *              ????????        :       Support the full private ioctl range
33  *              Alan Cox        :       Moved ioctl permission check into
34  *                                      drivers
35  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
36  *              Alan Cox        :       100 backlog just doesn't cut it when
37  *                                      you start doing multicast video 8)
38  *              Alan Cox        :       Rewrote net_bh and list manager.
39  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
40  *              Alan Cox        :       Took out transmit every packet pass
41  *                                      Saved a few bytes in the ioctl handler
42  *              Alan Cox        :       Network driver sets packet type before
43  *                                      calling netif_rx. Saves a function
44  *                                      call a packet.
45  *              Alan Cox        :       Hashed net_bh()
46  *              Richard Kooijman:       Timestamp fixes.
47  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
48  *              Alan Cox        :       Device lock protection.
49  *              Alan Cox        :       Fixed nasty side effect of device close
50  *                                      changes.
51  *              Rudi Cilibrasi  :       Pass the right thing to
52  *                                      set_mac_address()
53  *              Dave Miller     :       32bit quantity for the device lock to
54  *                                      make it work out on a Sparc.
55  *              Bjorn Ekwall    :       Added KERNELD hack.
56  *              Alan Cox        :       Cleaned up the backlog initialise.
57  *              Craig Metz      :       SIOCGIFCONF fix if space for under
58  *                                      1 device.
59  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
60  *                                      is no device open function.
61  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
62  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
63  *              Cyrus Durgin    :       Cleaned for KMOD
64  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
65  *                                      A network device unload needs to purge
66  *                                      the backlog queue.
67  *      Paul Rusty Russell      :       SIOCSIFNAME
68  *              Pekka Riikonen  :       Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *                                      indefinitely on dev->refcnt
71  *              J Hadi Salim    :       - Backlog queue sampling
72  *                                      - netif_rx() feedback
73  */
74
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
120 #include <net/ip.h>
121 #include <net/mpls.h>
122 #include <linux/ipv6.h>
123 #include <linux/in.h>
124 #include <linux/jhash.h>
125 #include <linux/random.h>
126 #include <trace/events/napi.h>
127 #include <trace/events/net.h>
128 #include <trace/events/skb.h>
129 #include <linux/pci.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
138
139 #include "net-sysfs.h"
140
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
143
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
146
147 static DEFINE_SPINLOCK(ptype_lock);
148 static DEFINE_SPINLOCK(offload_lock);
149 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
150 struct list_head ptype_all __read_mostly;       /* Taps */
151 static struct list_head offload_base __read_mostly;
152
153 static int netif_rx_internal(struct sk_buff *skb);
154 static int call_netdevice_notifiers_info(unsigned long val,
155                                          struct net_device *dev,
156                                          struct netdev_notifier_info *info);
157
158 /*
159  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
160  * semaphore.
161  *
162  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
163  *
164  * Writers must hold the rtnl semaphore while they loop through the
165  * dev_base_head list, and hold dev_base_lock for writing when they do the
166  * actual updates.  This allows pure readers to access the list even
167  * while a writer is preparing to update it.
168  *
169  * To put it another way, dev_base_lock is held for writing only to
170  * protect against pure readers; the rtnl semaphore provides the
171  * protection against other writers.
172  *
173  * See, for example usages, register_netdevice() and
174  * unregister_netdevice(), which must be called with the rtnl
175  * semaphore held.
176  */
177 DEFINE_RWLOCK(dev_base_lock);
178 EXPORT_SYMBOL(dev_base_lock);
179
180 /* protects napi_hash addition/deletion and napi_gen_id */
181 static DEFINE_SPINLOCK(napi_hash_lock);
182
183 static unsigned int napi_gen_id;
184 static DEFINE_HASHTABLE(napi_hash, 8);
185
186 static seqcount_t devnet_rename_seq;
187
188 static inline void dev_base_seq_inc(struct net *net)
189 {
190         while (++net->dev_base_seq == 0);
191 }
192
193 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
194 {
195         unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
196
197         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
198 }
199
200 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
201 {
202         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
203 }
204
205 static inline void rps_lock(struct softnet_data *sd)
206 {
207 #ifdef CONFIG_RPS
208         spin_lock(&sd->input_pkt_queue.lock);
209 #endif
210 }
211
212 static inline void rps_unlock(struct softnet_data *sd)
213 {
214 #ifdef CONFIG_RPS
215         spin_unlock(&sd->input_pkt_queue.lock);
216 #endif
217 }
218
219 /* Device list insertion */
220 static void list_netdevice(struct net_device *dev)
221 {
222         struct net *net = dev_net(dev);
223
224         ASSERT_RTNL();
225
226         write_lock_bh(&dev_base_lock);
227         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
228         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
229         hlist_add_head_rcu(&dev->index_hlist,
230                            dev_index_hash(net, dev->ifindex));
231         write_unlock_bh(&dev_base_lock);
232
233         dev_base_seq_inc(net);
234 }
235
236 /* Device list removal
237  * caller must respect a RCU grace period before freeing/reusing dev
238  */
239 static void unlist_netdevice(struct net_device *dev)
240 {
241         ASSERT_RTNL();
242
243         /* Unlink dev from the device chain */
244         write_lock_bh(&dev_base_lock);
245         list_del_rcu(&dev->dev_list);
246         hlist_del_rcu(&dev->name_hlist);
247         hlist_del_rcu(&dev->index_hlist);
248         write_unlock_bh(&dev_base_lock);
249
250         dev_base_seq_inc(dev_net(dev));
251 }
252
253 /*
254  *      Our notifier list
255  */
256
257 static RAW_NOTIFIER_HEAD(netdev_chain);
258
259 /*
260  *      Device drivers call our routines to queue packets here. We empty the
261  *      queue in the local softnet handler.
262  */
263
264 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
265 EXPORT_PER_CPU_SYMBOL(softnet_data);
266
267 #ifdef CONFIG_LOCKDEP
268 /*
269  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
270  * according to dev->type
271  */
272 static const unsigned short netdev_lock_type[] =
273         {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
274          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
275          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
276          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
277          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
278          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
279          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
280          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
281          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
282          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
283          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
284          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
285          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
286          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
287          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
288
289 static const char *const netdev_lock_name[] =
290         {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291          "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292          "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293          "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294          "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295          "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296          "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297          "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298          "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299          "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300          "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301          "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
302          "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
303          "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
304          "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
305
306 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
307 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
308
309 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
310 {
311         int i;
312
313         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
314                 if (netdev_lock_type[i] == dev_type)
315                         return i;
316         /* the last key is used by default */
317         return ARRAY_SIZE(netdev_lock_type) - 1;
318 }
319
320 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
321                                                  unsigned short dev_type)
322 {
323         int i;
324
325         i = netdev_lock_pos(dev_type);
326         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
327                                    netdev_lock_name[i]);
328 }
329
330 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
331 {
332         int i;
333
334         i = netdev_lock_pos(dev->type);
335         lockdep_set_class_and_name(&dev->addr_list_lock,
336                                    &netdev_addr_lock_key[i],
337                                    netdev_lock_name[i]);
338 }
339 #else
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341                                                  unsigned short dev_type)
342 {
343 }
344 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
345 {
346 }
347 #endif
348
349 /*******************************************************************************
350
351                 Protocol management and registration routines
352
353 *******************************************************************************/
354
355 /*
356  *      Add a protocol ID to the list. Now that the input handler is
357  *      smarter we can dispense with all the messy stuff that used to be
358  *      here.
359  *
360  *      BEWARE!!! Protocol handlers, mangling input packets,
361  *      MUST BE last in hash buckets and checking protocol handlers
362  *      MUST start from promiscuous ptype_all chain in net_bh.
363  *      It is true now, do not change it.
364  *      Explanation follows: if protocol handler, mangling packet, will
365  *      be the first on list, it is not able to sense, that packet
366  *      is cloned and should be copied-on-write, so that it will
367  *      change it and subsequent readers will get broken packet.
368  *                                                      --ANK (980803)
369  */
370
371 static inline struct list_head *ptype_head(const struct packet_type *pt)
372 {
373         if (pt->type == htons(ETH_P_ALL))
374                 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
375         else
376                 return pt->dev ? &pt->dev->ptype_specific :
377                                  &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
378 }
379
380 /**
381  *      dev_add_pack - add packet handler
382  *      @pt: packet type declaration
383  *
384  *      Add a protocol handler to the networking stack. The passed &packet_type
385  *      is linked into kernel lists and may not be freed until it has been
386  *      removed from the kernel lists.
387  *
388  *      This call does not sleep therefore it can not
389  *      guarantee all CPU's that are in middle of receiving packets
390  *      will see the new packet type (until the next received packet).
391  */
392
393 void dev_add_pack(struct packet_type *pt)
394 {
395         struct list_head *head = ptype_head(pt);
396
397         spin_lock(&ptype_lock);
398         list_add_rcu(&pt->list, head);
399         spin_unlock(&ptype_lock);
400 }
401 EXPORT_SYMBOL(dev_add_pack);
402
403 /**
404  *      __dev_remove_pack        - remove packet handler
405  *      @pt: packet type declaration
406  *
407  *      Remove a protocol handler that was previously added to the kernel
408  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
409  *      from the kernel lists and can be freed or reused once this function
410  *      returns.
411  *
412  *      The packet type might still be in use by receivers
413  *      and must not be freed until after all the CPU's have gone
414  *      through a quiescent state.
415  */
416 void __dev_remove_pack(struct packet_type *pt)
417 {
418         struct list_head *head = ptype_head(pt);
419         struct packet_type *pt1;
420
421         spin_lock(&ptype_lock);
422
423         list_for_each_entry(pt1, head, list) {
424                 if (pt == pt1) {
425                         list_del_rcu(&pt->list);
426                         goto out;
427                 }
428         }
429
430         pr_warn("dev_remove_pack: %p not found\n", pt);
431 out:
432         spin_unlock(&ptype_lock);
433 }
434 EXPORT_SYMBOL(__dev_remove_pack);
435
436 /**
437  *      dev_remove_pack  - remove packet handler
438  *      @pt: packet type declaration
439  *
440  *      Remove a protocol handler that was previously added to the kernel
441  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
442  *      from the kernel lists and can be freed or reused once this function
443  *      returns.
444  *
445  *      This call sleeps to guarantee that no CPU is looking at the packet
446  *      type after return.
447  */
448 void dev_remove_pack(struct packet_type *pt)
449 {
450         __dev_remove_pack(pt);
451
452         synchronize_net();
453 }
454 EXPORT_SYMBOL(dev_remove_pack);
455
456
457 /**
458  *      dev_add_offload - register offload handlers
459  *      @po: protocol offload declaration
460  *
461  *      Add protocol offload handlers to the networking stack. The passed
462  *      &proto_offload is linked into kernel lists and may not be freed until
463  *      it has been removed from the kernel lists.
464  *
465  *      This call does not sleep therefore it can not
466  *      guarantee all CPU's that are in middle of receiving packets
467  *      will see the new offload handlers (until the next received packet).
468  */
469 void dev_add_offload(struct packet_offload *po)
470 {
471         struct list_head *head = &offload_base;
472
473         spin_lock(&offload_lock);
474         list_add_rcu(&po->list, head);
475         spin_unlock(&offload_lock);
476 }
477 EXPORT_SYMBOL(dev_add_offload);
478
479 /**
480  *      __dev_remove_offload     - remove offload handler
481  *      @po: packet offload declaration
482  *
483  *      Remove a protocol offload handler that was previously added to the
484  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
485  *      is removed from the kernel lists and can be freed or reused once this
486  *      function returns.
487  *
488  *      The packet type might still be in use by receivers
489  *      and must not be freed until after all the CPU's have gone
490  *      through a quiescent state.
491  */
492 static void __dev_remove_offload(struct packet_offload *po)
493 {
494         struct list_head *head = &offload_base;
495         struct packet_offload *po1;
496
497         spin_lock(&offload_lock);
498
499         list_for_each_entry(po1, head, list) {
500                 if (po == po1) {
501                         list_del_rcu(&po->list);
502                         goto out;
503                 }
504         }
505
506         pr_warn("dev_remove_offload: %p not found\n", po);
507 out:
508         spin_unlock(&offload_lock);
509 }
510
511 /**
512  *      dev_remove_offload       - remove packet offload handler
513  *      @po: packet offload declaration
514  *
515  *      Remove a packet offload handler that was previously added to the kernel
516  *      offload handlers by dev_add_offload(). The passed &offload_type is
517  *      removed from the kernel lists and can be freed or reused once this
518  *      function returns.
519  *
520  *      This call sleeps to guarantee that no CPU is looking at the packet
521  *      type after return.
522  */
523 void dev_remove_offload(struct packet_offload *po)
524 {
525         __dev_remove_offload(po);
526
527         synchronize_net();
528 }
529 EXPORT_SYMBOL(dev_remove_offload);
530
531 /******************************************************************************
532
533                       Device Boot-time Settings Routines
534
535 *******************************************************************************/
536
537 /* Boot time configuration table */
538 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
539
540 /**
541  *      netdev_boot_setup_add   - add new setup entry
542  *      @name: name of the device
543  *      @map: configured settings for the device
544  *
545  *      Adds new setup entry to the dev_boot_setup list.  The function
546  *      returns 0 on error and 1 on success.  This is a generic routine to
547  *      all netdevices.
548  */
549 static int netdev_boot_setup_add(char *name, struct ifmap *map)
550 {
551         struct netdev_boot_setup *s;
552         int i;
553
554         s = dev_boot_setup;
555         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
556                 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
557                         memset(s[i].name, 0, sizeof(s[i].name));
558                         strlcpy(s[i].name, name, IFNAMSIZ);
559                         memcpy(&s[i].map, map, sizeof(s[i].map));
560                         break;
561                 }
562         }
563
564         return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
565 }
566
567 /**
568  *      netdev_boot_setup_check - check boot time settings
569  *      @dev: the netdevice
570  *
571  *      Check boot time settings for the device.
572  *      The found settings are set for the device to be used
573  *      later in the device probing.
574  *      Returns 0 if no settings found, 1 if they are.
575  */
576 int netdev_boot_setup_check(struct net_device *dev)
577 {
578         struct netdev_boot_setup *s = dev_boot_setup;
579         int i;
580
581         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
582                 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
583                     !strcmp(dev->name, s[i].name)) {
584                         dev->irq        = s[i].map.irq;
585                         dev->base_addr  = s[i].map.base_addr;
586                         dev->mem_start  = s[i].map.mem_start;
587                         dev->mem_end    = s[i].map.mem_end;
588                         return 1;
589                 }
590         }
591         return 0;
592 }
593 EXPORT_SYMBOL(netdev_boot_setup_check);
594
595
596 /**
597  *      netdev_boot_base        - get address from boot time settings
598  *      @prefix: prefix for network device
599  *      @unit: id for network device
600  *
601  *      Check boot time settings for the base address of device.
602  *      The found settings are set for the device to be used
603  *      later in the device probing.
604  *      Returns 0 if no settings found.
605  */
606 unsigned long netdev_boot_base(const char *prefix, int unit)
607 {
608         const struct netdev_boot_setup *s = dev_boot_setup;
609         char name[IFNAMSIZ];
610         int i;
611
612         sprintf(name, "%s%d", prefix, unit);
613
614         /*
615          * If device already registered then return base of 1
616          * to indicate not to probe for this interface
617          */
618         if (__dev_get_by_name(&init_net, name))
619                 return 1;
620
621         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
622                 if (!strcmp(name, s[i].name))
623                         return s[i].map.base_addr;
624         return 0;
625 }
626
627 /*
628  * Saves at boot time configured settings for any netdevice.
629  */
630 int __init netdev_boot_setup(char *str)
631 {
632         int ints[5];
633         struct ifmap map;
634
635         str = get_options(str, ARRAY_SIZE(ints), ints);
636         if (!str || !*str)
637                 return 0;
638
639         /* Save settings */
640         memset(&map, 0, sizeof(map));
641         if (ints[0] > 0)
642                 map.irq = ints[1];
643         if (ints[0] > 1)
644                 map.base_addr = ints[2];
645         if (ints[0] > 2)
646                 map.mem_start = ints[3];
647         if (ints[0] > 3)
648                 map.mem_end = ints[4];
649
650         /* Add new entry to the list */
651         return netdev_boot_setup_add(str, &map);
652 }
653
654 __setup("netdev=", netdev_boot_setup);
655
656 /*******************************************************************************
657
658                             Device Interface Subroutines
659
660 *******************************************************************************/
661
662 /**
663  *      dev_get_iflink  - get 'iflink' value of a interface
664  *      @dev: targeted interface
665  *
666  *      Indicates the ifindex the interface is linked to.
667  *      Physical interfaces have the same 'ifindex' and 'iflink' values.
668  */
669
670 int dev_get_iflink(const struct net_device *dev)
671 {
672         if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
673                 return dev->netdev_ops->ndo_get_iflink(dev);
674
675         /* If dev->rtnl_link_ops is set, it's a virtual interface. */
676         if (dev->rtnl_link_ops)
677                 return 0;
678
679         return dev->ifindex;
680 }
681 EXPORT_SYMBOL(dev_get_iflink);
682
683 /**
684  *      __dev_get_by_name       - find a device by its name
685  *      @net: the applicable net namespace
686  *      @name: name to find
687  *
688  *      Find an interface by name. Must be called under RTNL semaphore
689  *      or @dev_base_lock. If the name is found a pointer to the device
690  *      is returned. If the name is not found then %NULL is returned. The
691  *      reference counters are not incremented so the caller must be
692  *      careful with locks.
693  */
694
695 struct net_device *__dev_get_by_name(struct net *net, const char *name)
696 {
697         struct net_device *dev;
698         struct hlist_head *head = dev_name_hash(net, name);
699
700         hlist_for_each_entry(dev, head, name_hlist)
701                 if (!strncmp(dev->name, name, IFNAMSIZ))
702                         return dev;
703
704         return NULL;
705 }
706 EXPORT_SYMBOL(__dev_get_by_name);
707
708 /**
709  *      dev_get_by_name_rcu     - find a device by its name
710  *      @net: the applicable net namespace
711  *      @name: name to find
712  *
713  *      Find an interface by name.
714  *      If the name is found a pointer to the device is returned.
715  *      If the name is not found then %NULL is returned.
716  *      The reference counters are not incremented so the caller must be
717  *      careful with locks. The caller must hold RCU lock.
718  */
719
720 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
721 {
722         struct net_device *dev;
723         struct hlist_head *head = dev_name_hash(net, name);
724
725         hlist_for_each_entry_rcu(dev, head, name_hlist)
726                 if (!strncmp(dev->name, name, IFNAMSIZ))
727                         return dev;
728
729         return NULL;
730 }
731 EXPORT_SYMBOL(dev_get_by_name_rcu);
732
733 /**
734  *      dev_get_by_name         - find a device by its name
735  *      @net: the applicable net namespace
736  *      @name: name to find
737  *
738  *      Find an interface by name. This can be called from any
739  *      context and does its own locking. The returned handle has
740  *      the usage count incremented and the caller must use dev_put() to
741  *      release it when it is no longer needed. %NULL is returned if no
742  *      matching device is found.
743  */
744
745 struct net_device *dev_get_by_name(struct net *net, const char *name)
746 {
747         struct net_device *dev;
748
749         rcu_read_lock();
750         dev = dev_get_by_name_rcu(net, name);
751         if (dev)
752                 dev_hold(dev);
753         rcu_read_unlock();
754         return dev;
755 }
756 EXPORT_SYMBOL(dev_get_by_name);
757
758 /**
759  *      __dev_get_by_index - find a device by its ifindex
760  *      @net: the applicable net namespace
761  *      @ifindex: index of device
762  *
763  *      Search for an interface by index. Returns %NULL if the device
764  *      is not found or a pointer to the device. The device has not
765  *      had its reference counter increased so the caller must be careful
766  *      about locking. The caller must hold either the RTNL semaphore
767  *      or @dev_base_lock.
768  */
769
770 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
771 {
772         struct net_device *dev;
773         struct hlist_head *head = dev_index_hash(net, ifindex);
774
775         hlist_for_each_entry(dev, head, index_hlist)
776                 if (dev->ifindex == ifindex)
777                         return dev;
778
779         return NULL;
780 }
781 EXPORT_SYMBOL(__dev_get_by_index);
782
783 /**
784  *      dev_get_by_index_rcu - find a device by its ifindex
785  *      @net: the applicable net namespace
786  *      @ifindex: index of device
787  *
788  *      Search for an interface by index. Returns %NULL if the device
789  *      is not found or a pointer to the device. The device has not
790  *      had its reference counter increased so the caller must be careful
791  *      about locking. The caller must hold RCU lock.
792  */
793
794 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
795 {
796         struct net_device *dev;
797         struct hlist_head *head = dev_index_hash(net, ifindex);
798
799         hlist_for_each_entry_rcu(dev, head, index_hlist)
800                 if (dev->ifindex == ifindex)
801                         return dev;
802
803         return NULL;
804 }
805 EXPORT_SYMBOL(dev_get_by_index_rcu);
806
807
808 /**
809  *      dev_get_by_index - find a device by its ifindex
810  *      @net: the applicable net namespace
811  *      @ifindex: index of device
812  *
813  *      Search for an interface by index. Returns NULL if the device
814  *      is not found or a pointer to the device. The device returned has
815  *      had a reference added and the pointer is safe until the user calls
816  *      dev_put to indicate they have finished with it.
817  */
818
819 struct net_device *dev_get_by_index(struct net *net, int ifindex)
820 {
821         struct net_device *dev;
822
823         rcu_read_lock();
824         dev = dev_get_by_index_rcu(net, ifindex);
825         if (dev)
826                 dev_hold(dev);
827         rcu_read_unlock();
828         return dev;
829 }
830 EXPORT_SYMBOL(dev_get_by_index);
831
832 /**
833  *      netdev_get_name - get a netdevice name, knowing its ifindex.
834  *      @net: network namespace
835  *      @name: a pointer to the buffer where the name will be stored.
836  *      @ifindex: the ifindex of the interface to get the name from.
837  *
838  *      The use of raw_seqcount_begin() and cond_resched() before
839  *      retrying is required as we want to give the writers a chance
840  *      to complete when CONFIG_PREEMPT is not set.
841  */
842 int netdev_get_name(struct net *net, char *name, int ifindex)
843 {
844         struct net_device *dev;
845         unsigned int seq;
846
847 retry:
848         seq = raw_seqcount_begin(&devnet_rename_seq);
849         rcu_read_lock();
850         dev = dev_get_by_index_rcu(net, ifindex);
851         if (!dev) {
852                 rcu_read_unlock();
853                 return -ENODEV;
854         }
855
856         strcpy(name, dev->name);
857         rcu_read_unlock();
858         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
859                 cond_resched();
860                 goto retry;
861         }
862
863         return 0;
864 }
865
866 /**
867  *      dev_getbyhwaddr_rcu - find a device by its hardware address
868  *      @net: the applicable net namespace
869  *      @type: media type of device
870  *      @ha: hardware address
871  *
872  *      Search for an interface by MAC address. Returns NULL if the device
873  *      is not found or a pointer to the device.
874  *      The caller must hold RCU or RTNL.
875  *      The returned device has not had its ref count increased
876  *      and the caller must therefore be careful about locking
877  *
878  */
879
880 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
881                                        const char *ha)
882 {
883         struct net_device *dev;
884
885         for_each_netdev_rcu(net, dev)
886                 if (dev->type == type &&
887                     !memcmp(dev->dev_addr, ha, dev->addr_len))
888                         return dev;
889
890         return NULL;
891 }
892 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
893
894 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
895 {
896         struct net_device *dev;
897
898         ASSERT_RTNL();
899         for_each_netdev(net, dev)
900                 if (dev->type == type)
901                         return dev;
902
903         return NULL;
904 }
905 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
906
907 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
908 {
909         struct net_device *dev, *ret = NULL;
910
911         rcu_read_lock();
912         for_each_netdev_rcu(net, dev)
913                 if (dev->type == type) {
914                         dev_hold(dev);
915                         ret = dev;
916                         break;
917                 }
918         rcu_read_unlock();
919         return ret;
920 }
921 EXPORT_SYMBOL(dev_getfirstbyhwtype);
922
923 /**
924  *      __dev_get_by_flags - find any device with given flags
925  *      @net: the applicable net namespace
926  *      @if_flags: IFF_* values
927  *      @mask: bitmask of bits in if_flags to check
928  *
929  *      Search for any interface with the given flags. Returns NULL if a device
930  *      is not found or a pointer to the device. Must be called inside
931  *      rtnl_lock(), and result refcount is unchanged.
932  */
933
934 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
935                                       unsigned short mask)
936 {
937         struct net_device *dev, *ret;
938
939         ASSERT_RTNL();
940
941         ret = NULL;
942         for_each_netdev(net, dev) {
943                 if (((dev->flags ^ if_flags) & mask) == 0) {
944                         ret = dev;
945                         break;
946                 }
947         }
948         return ret;
949 }
950 EXPORT_SYMBOL(__dev_get_by_flags);
951
952 /**
953  *      dev_valid_name - check if name is okay for network device
954  *      @name: name string
955  *
956  *      Network device names need to be valid file names to
957  *      to allow sysfs to work.  We also disallow any kind of
958  *      whitespace.
959  */
960 bool dev_valid_name(const char *name)
961 {
962         if (*name == '\0')
963                 return false;
964         if (strlen(name) >= IFNAMSIZ)
965                 return false;
966         if (!strcmp(name, ".") || !strcmp(name, ".."))
967                 return false;
968
969         while (*name) {
970                 if (*name == '/' || *name == ':' || isspace(*name))
971                         return false;
972                 name++;
973         }
974         return true;
975 }
976 EXPORT_SYMBOL(dev_valid_name);
977
978 /**
979  *      __dev_alloc_name - allocate a name for a device
980  *      @net: network namespace to allocate the device name in
981  *      @name: name format string
982  *      @buf:  scratch buffer and result name string
983  *
984  *      Passed a format string - eg "lt%d" it will try and find a suitable
985  *      id. It scans list of devices to build up a free map, then chooses
986  *      the first empty slot. The caller must hold the dev_base or rtnl lock
987  *      while allocating the name and adding the device in order to avoid
988  *      duplicates.
989  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
990  *      Returns the number of the unit assigned or a negative errno code.
991  */
992
993 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
994 {
995         int i = 0;
996         const char *p;
997         const int max_netdevices = 8*PAGE_SIZE;
998         unsigned long *inuse;
999         struct net_device *d;
1000
1001         p = strnchr(name, IFNAMSIZ-1, '%');
1002         if (p) {
1003                 /*
1004                  * Verify the string as this thing may have come from
1005                  * the user.  There must be either one "%d" and no other "%"
1006                  * characters.
1007                  */
1008                 if (p[1] != 'd' || strchr(p + 2, '%'))
1009                         return -EINVAL;
1010
1011                 /* Use one page as a bit array of possible slots */
1012                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1013                 if (!inuse)
1014                         return -ENOMEM;
1015
1016                 for_each_netdev(net, d) {
1017                         if (!sscanf(d->name, name, &i))
1018                                 continue;
1019                         if (i < 0 || i >= max_netdevices)
1020                                 continue;
1021
1022                         /*  avoid cases where sscanf is not exact inverse of printf */
1023                         snprintf(buf, IFNAMSIZ, name, i);
1024                         if (!strncmp(buf, d->name, IFNAMSIZ))
1025                                 set_bit(i, inuse);
1026                 }
1027
1028                 i = find_first_zero_bit(inuse, max_netdevices);
1029                 free_page((unsigned long) inuse);
1030         }
1031
1032         if (buf != name)
1033                 snprintf(buf, IFNAMSIZ, name, i);
1034         if (!__dev_get_by_name(net, buf))
1035                 return i;
1036
1037         /* It is possible to run out of possible slots
1038          * when the name is long and there isn't enough space left
1039          * for the digits, or if all bits are used.
1040          */
1041         return -ENFILE;
1042 }
1043
1044 /**
1045  *      dev_alloc_name - allocate a name for a device
1046  *      @dev: device
1047  *      @name: name format string
1048  *
1049  *      Passed a format string - eg "lt%d" it will try and find a suitable
1050  *      id. It scans list of devices to build up a free map, then chooses
1051  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1052  *      while allocating the name and adding the device in order to avoid
1053  *      duplicates.
1054  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055  *      Returns the number of the unit assigned or a negative errno code.
1056  */
1057
1058 int dev_alloc_name(struct net_device *dev, const char *name)
1059 {
1060         char buf[IFNAMSIZ];
1061         struct net *net;
1062         int ret;
1063
1064         BUG_ON(!dev_net(dev));
1065         net = dev_net(dev);
1066         ret = __dev_alloc_name(net, name, buf);
1067         if (ret >= 0)
1068                 strlcpy(dev->name, buf, IFNAMSIZ);
1069         return ret;
1070 }
1071 EXPORT_SYMBOL(dev_alloc_name);
1072
1073 static int dev_alloc_name_ns(struct net *net,
1074                              struct net_device *dev,
1075                              const char *name)
1076 {
1077         char buf[IFNAMSIZ];
1078         int ret;
1079
1080         ret = __dev_alloc_name(net, name, buf);
1081         if (ret >= 0)
1082                 strlcpy(dev->name, buf, IFNAMSIZ);
1083         return ret;
1084 }
1085
1086 static int dev_get_valid_name(struct net *net,
1087                               struct net_device *dev,
1088                               const char *name)
1089 {
1090         BUG_ON(!net);
1091
1092         if (!dev_valid_name(name))
1093                 return -EINVAL;
1094
1095         if (strchr(name, '%'))
1096                 return dev_alloc_name_ns(net, dev, name);
1097         else if (__dev_get_by_name(net, name))
1098                 return -EEXIST;
1099         else if (dev->name != name)
1100                 strlcpy(dev->name, name, IFNAMSIZ);
1101
1102         return 0;
1103 }
1104
1105 /**
1106  *      dev_change_name - change name of a device
1107  *      @dev: device
1108  *      @newname: name (or format string) must be at least IFNAMSIZ
1109  *
1110  *      Change name of a device, can pass format strings "eth%d".
1111  *      for wildcarding.
1112  */
1113 int dev_change_name(struct net_device *dev, const char *newname)
1114 {
1115         unsigned char old_assign_type;
1116         char oldname[IFNAMSIZ];
1117         int err = 0;
1118         int ret;
1119         struct net *net;
1120
1121         ASSERT_RTNL();
1122         BUG_ON(!dev_net(dev));
1123
1124         net = dev_net(dev);
1125         if (dev->flags & IFF_UP)
1126                 return -EBUSY;
1127
1128         write_seqcount_begin(&devnet_rename_seq);
1129
1130         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1131                 write_seqcount_end(&devnet_rename_seq);
1132                 return 0;
1133         }
1134
1135         memcpy(oldname, dev->name, IFNAMSIZ);
1136
1137         err = dev_get_valid_name(net, dev, newname);
1138         if (err < 0) {
1139                 write_seqcount_end(&devnet_rename_seq);
1140                 return err;
1141         }
1142
1143         if (oldname[0] && !strchr(oldname, '%'))
1144                 netdev_info(dev, "renamed from %s\n", oldname);
1145
1146         old_assign_type = dev->name_assign_type;
1147         dev->name_assign_type = NET_NAME_RENAMED;
1148
1149 rollback:
1150         ret = device_rename(&dev->dev, dev->name);
1151         if (ret) {
1152                 memcpy(dev->name, oldname, IFNAMSIZ);
1153                 dev->name_assign_type = old_assign_type;
1154                 write_seqcount_end(&devnet_rename_seq);
1155                 return ret;
1156         }
1157
1158         write_seqcount_end(&devnet_rename_seq);
1159
1160         netdev_adjacent_rename_links(dev, oldname);
1161
1162         write_lock_bh(&dev_base_lock);
1163         hlist_del_rcu(&dev->name_hlist);
1164         write_unlock_bh(&dev_base_lock);
1165
1166         synchronize_rcu();
1167
1168         write_lock_bh(&dev_base_lock);
1169         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1170         write_unlock_bh(&dev_base_lock);
1171
1172         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1173         ret = notifier_to_errno(ret);
1174
1175         if (ret) {
1176                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1177                 if (err >= 0) {
1178                         err = ret;
1179                         write_seqcount_begin(&devnet_rename_seq);
1180                         memcpy(dev->name, oldname, IFNAMSIZ);
1181                         memcpy(oldname, newname, IFNAMSIZ);
1182                         dev->name_assign_type = old_assign_type;
1183                         old_assign_type = NET_NAME_RENAMED;
1184                         goto rollback;
1185                 } else {
1186                         pr_err("%s: name change rollback failed: %d\n",
1187                                dev->name, ret);
1188                 }
1189         }
1190
1191         return err;
1192 }
1193
1194 /**
1195  *      dev_set_alias - change ifalias of a device
1196  *      @dev: device
1197  *      @alias: name up to IFALIASZ
1198  *      @len: limit of bytes to copy from info
1199  *
1200  *      Set ifalias for a device,
1201  */
1202 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1203 {
1204         char *new_ifalias;
1205
1206         ASSERT_RTNL();
1207
1208         if (len >= IFALIASZ)
1209                 return -EINVAL;
1210
1211         if (!len) {
1212                 kfree(dev->ifalias);
1213                 dev->ifalias = NULL;
1214                 return 0;
1215         }
1216
1217         new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1218         if (!new_ifalias)
1219                 return -ENOMEM;
1220         dev->ifalias = new_ifalias;
1221
1222         strlcpy(dev->ifalias, alias, len+1);
1223         return len;
1224 }
1225
1226
1227 /**
1228  *      netdev_features_change - device changes features
1229  *      @dev: device to cause notification
1230  *
1231  *      Called to indicate a device has changed features.
1232  */
1233 void netdev_features_change(struct net_device *dev)
1234 {
1235         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1236 }
1237 EXPORT_SYMBOL(netdev_features_change);
1238
1239 /**
1240  *      netdev_state_change - device changes state
1241  *      @dev: device to cause notification
1242  *
1243  *      Called to indicate a device has changed state. This function calls
1244  *      the notifier chains for netdev_chain and sends a NEWLINK message
1245  *      to the routing socket.
1246  */
1247 void netdev_state_change(struct net_device *dev)
1248 {
1249         if (dev->flags & IFF_UP) {
1250                 struct netdev_notifier_change_info change_info;
1251
1252                 change_info.flags_changed = 0;
1253                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1254                                               &change_info.info);
1255                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1256         }
1257 }
1258 EXPORT_SYMBOL(netdev_state_change);
1259
1260 /**
1261  *      netdev_notify_peers - notify network peers about existence of @dev
1262  *      @dev: network device
1263  *
1264  * Generate traffic such that interested network peers are aware of
1265  * @dev, such as by generating a gratuitous ARP. This may be used when
1266  * a device wants to inform the rest of the network about some sort of
1267  * reconfiguration such as a failover event or virtual machine
1268  * migration.
1269  */
1270 void netdev_notify_peers(struct net_device *dev)
1271 {
1272         rtnl_lock();
1273         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1274         rtnl_unlock();
1275 }
1276 EXPORT_SYMBOL(netdev_notify_peers);
1277
1278 static int __dev_open(struct net_device *dev)
1279 {
1280         const struct net_device_ops *ops = dev->netdev_ops;
1281         int ret;
1282
1283         ASSERT_RTNL();
1284
1285         if (!netif_device_present(dev))
1286                 return -ENODEV;
1287
1288         /* Block netpoll from trying to do any rx path servicing.
1289          * If we don't do this there is a chance ndo_poll_controller
1290          * or ndo_poll may be running while we open the device
1291          */
1292         netpoll_poll_disable(dev);
1293
1294         ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1295         ret = notifier_to_errno(ret);
1296         if (ret)
1297                 return ret;
1298
1299         set_bit(__LINK_STATE_START, &dev->state);
1300
1301         if (ops->ndo_validate_addr)
1302                 ret = ops->ndo_validate_addr(dev);
1303
1304         if (!ret && ops->ndo_open)
1305                 ret = ops->ndo_open(dev);
1306
1307         netpoll_poll_enable(dev);
1308
1309         if (ret)
1310                 clear_bit(__LINK_STATE_START, &dev->state);
1311         else {
1312                 dev->flags |= IFF_UP;
1313                 dev_set_rx_mode(dev);
1314                 dev_activate(dev);
1315                 add_device_randomness(dev->dev_addr, dev->addr_len);
1316         }
1317
1318         return ret;
1319 }
1320
1321 /**
1322  *      dev_open        - prepare an interface for use.
1323  *      @dev:   device to open
1324  *
1325  *      Takes a device from down to up state. The device's private open
1326  *      function is invoked and then the multicast lists are loaded. Finally
1327  *      the device is moved into the up state and a %NETDEV_UP message is
1328  *      sent to the netdev notifier chain.
1329  *
1330  *      Calling this function on an active interface is a nop. On a failure
1331  *      a negative errno code is returned.
1332  */
1333 int dev_open(struct net_device *dev)
1334 {
1335         int ret;
1336
1337         if (dev->flags & IFF_UP)
1338                 return 0;
1339
1340         ret = __dev_open(dev);
1341         if (ret < 0)
1342                 return ret;
1343
1344         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1345         call_netdevice_notifiers(NETDEV_UP, dev);
1346
1347         return ret;
1348 }
1349 EXPORT_SYMBOL(dev_open);
1350
1351 static int __dev_close_many(struct list_head *head)
1352 {
1353         struct net_device *dev;
1354
1355         ASSERT_RTNL();
1356         might_sleep();
1357
1358         list_for_each_entry(dev, head, close_list) {
1359                 /* Temporarily disable netpoll until the interface is down */
1360                 netpoll_poll_disable(dev);
1361
1362                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1363
1364                 clear_bit(__LINK_STATE_START, &dev->state);
1365
1366                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1367                  * can be even on different cpu. So just clear netif_running().
1368                  *
1369                  * dev->stop() will invoke napi_disable() on all of it's
1370                  * napi_struct instances on this device.
1371                  */
1372                 smp_mb__after_atomic(); /* Commit netif_running(). */
1373         }
1374
1375         dev_deactivate_many(head);
1376
1377         list_for_each_entry(dev, head, close_list) {
1378                 const struct net_device_ops *ops = dev->netdev_ops;
1379
1380                 /*
1381                  *      Call the device specific close. This cannot fail.
1382                  *      Only if device is UP
1383                  *
1384                  *      We allow it to be called even after a DETACH hot-plug
1385                  *      event.
1386                  */
1387                 if (ops->ndo_stop)
1388                         ops->ndo_stop(dev);
1389
1390                 dev->flags &= ~IFF_UP;
1391                 netpoll_poll_enable(dev);
1392         }
1393
1394         return 0;
1395 }
1396
1397 static int __dev_close(struct net_device *dev)
1398 {
1399         int retval;
1400         LIST_HEAD(single);
1401
1402         list_add(&dev->close_list, &single);
1403         retval = __dev_close_many(&single);
1404         list_del(&single);
1405
1406         return retval;
1407 }
1408
1409 int dev_close_many(struct list_head *head, bool unlink)
1410 {
1411         struct net_device *dev, *tmp;
1412
1413         /* Remove the devices that don't need to be closed */
1414         list_for_each_entry_safe(dev, tmp, head, close_list)
1415                 if (!(dev->flags & IFF_UP))
1416                         list_del_init(&dev->close_list);
1417
1418         __dev_close_many(head);
1419
1420         list_for_each_entry_safe(dev, tmp, head, close_list) {
1421                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1422                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1423                 if (unlink)
1424                         list_del_init(&dev->close_list);
1425         }
1426
1427         return 0;
1428 }
1429 EXPORT_SYMBOL(dev_close_many);
1430
1431 /**
1432  *      dev_close - shutdown an interface.
1433  *      @dev: device to shutdown
1434  *
1435  *      This function moves an active device into down state. A
1436  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1437  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1438  *      chain.
1439  */
1440 int dev_close(struct net_device *dev)
1441 {
1442         if (dev->flags & IFF_UP) {
1443                 LIST_HEAD(single);
1444
1445                 list_add(&dev->close_list, &single);
1446                 dev_close_many(&single, true);
1447                 list_del(&single);
1448         }
1449         return 0;
1450 }
1451 EXPORT_SYMBOL(dev_close);
1452
1453
1454 /**
1455  *      dev_disable_lro - disable Large Receive Offload on a device
1456  *      @dev: device
1457  *
1458  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1459  *      called under RTNL.  This is needed if received packets may be
1460  *      forwarded to another interface.
1461  */
1462 void dev_disable_lro(struct net_device *dev)
1463 {
1464         struct net_device *lower_dev;
1465         struct list_head *iter;
1466
1467         dev->wanted_features &= ~NETIF_F_LRO;
1468         netdev_update_features(dev);
1469
1470         if (unlikely(dev->features & NETIF_F_LRO))
1471                 netdev_WARN(dev, "failed to disable LRO!\n");
1472
1473         netdev_for_each_lower_dev(dev, lower_dev, iter)
1474                 dev_disable_lro(lower_dev);
1475 }
1476 EXPORT_SYMBOL(dev_disable_lro);
1477
1478 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1479                                    struct net_device *dev)
1480 {
1481         struct netdev_notifier_info info;
1482
1483         netdev_notifier_info_init(&info, dev);
1484         return nb->notifier_call(nb, val, &info);
1485 }
1486
1487 static int dev_boot_phase = 1;
1488
1489 /**
1490  *      register_netdevice_notifier - register a network notifier block
1491  *      @nb: notifier
1492  *
1493  *      Register a notifier to be called when network device events occur.
1494  *      The notifier passed is linked into the kernel structures and must
1495  *      not be reused until it has been unregistered. A negative errno code
1496  *      is returned on a failure.
1497  *
1498  *      When registered all registration and up events are replayed
1499  *      to the new notifier to allow device to have a race free
1500  *      view of the network device list.
1501  */
1502
1503 int register_netdevice_notifier(struct notifier_block *nb)
1504 {
1505         struct net_device *dev;
1506         struct net_device *last;
1507         struct net *net;
1508         int err;
1509
1510         rtnl_lock();
1511         err = raw_notifier_chain_register(&netdev_chain, nb);
1512         if (err)
1513                 goto unlock;
1514         if (dev_boot_phase)
1515                 goto unlock;
1516         for_each_net(net) {
1517                 for_each_netdev(net, dev) {
1518                         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1519                         err = notifier_to_errno(err);
1520                         if (err)
1521                                 goto rollback;
1522
1523                         if (!(dev->flags & IFF_UP))
1524                                 continue;
1525
1526                         call_netdevice_notifier(nb, NETDEV_UP, dev);
1527                 }
1528         }
1529
1530 unlock:
1531         rtnl_unlock();
1532         return err;
1533
1534 rollback:
1535         last = dev;
1536         for_each_net(net) {
1537                 for_each_netdev(net, dev) {
1538                         if (dev == last)
1539                                 goto outroll;
1540
1541                         if (dev->flags & IFF_UP) {
1542                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1543                                                         dev);
1544                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1545                         }
1546                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1547                 }
1548         }
1549
1550 outroll:
1551         raw_notifier_chain_unregister(&netdev_chain, nb);
1552         goto unlock;
1553 }
1554 EXPORT_SYMBOL(register_netdevice_notifier);
1555
1556 /**
1557  *      unregister_netdevice_notifier - unregister a network notifier block
1558  *      @nb: notifier
1559  *
1560  *      Unregister a notifier previously registered by
1561  *      register_netdevice_notifier(). The notifier is unlinked into the
1562  *      kernel structures and may then be reused. A negative errno code
1563  *      is returned on a failure.
1564  *
1565  *      After unregistering unregister and down device events are synthesized
1566  *      for all devices on the device list to the removed notifier to remove
1567  *      the need for special case cleanup code.
1568  */
1569
1570 int unregister_netdevice_notifier(struct notifier_block *nb)
1571 {
1572         struct net_device *dev;
1573         struct net *net;
1574         int err;
1575
1576         rtnl_lock();
1577         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1578         if (err)
1579                 goto unlock;
1580
1581         for_each_net(net) {
1582                 for_each_netdev(net, dev) {
1583                         if (dev->flags & IFF_UP) {
1584                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1585                                                         dev);
1586                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1587                         }
1588                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1589                 }
1590         }
1591 unlock:
1592         rtnl_unlock();
1593         return err;
1594 }
1595 EXPORT_SYMBOL(unregister_netdevice_notifier);
1596
1597 /**
1598  *      call_netdevice_notifiers_info - call all network notifier blocks
1599  *      @val: value passed unmodified to notifier function
1600  *      @dev: net_device pointer passed unmodified to notifier function
1601  *      @info: notifier information data
1602  *
1603  *      Call all network notifier blocks.  Parameters and return value
1604  *      are as for raw_notifier_call_chain().
1605  */
1606
1607 static int call_netdevice_notifiers_info(unsigned long val,
1608                                          struct net_device *dev,
1609                                          struct netdev_notifier_info *info)
1610 {
1611         ASSERT_RTNL();
1612         netdev_notifier_info_init(info, dev);
1613         return raw_notifier_call_chain(&netdev_chain, val, info);
1614 }
1615
1616 /**
1617  *      call_netdevice_notifiers - call all network notifier blocks
1618  *      @val: value passed unmodified to notifier function
1619  *      @dev: net_device pointer passed unmodified to notifier function
1620  *
1621  *      Call all network notifier blocks.  Parameters and return value
1622  *      are as for raw_notifier_call_chain().
1623  */
1624
1625 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1626 {
1627         struct netdev_notifier_info info;
1628
1629         return call_netdevice_notifiers_info(val, dev, &info);
1630 }
1631 EXPORT_SYMBOL(call_netdevice_notifiers);
1632
1633 #ifdef CONFIG_NET_CLS_ACT
1634 static struct static_key ingress_needed __read_mostly;
1635
1636 void net_inc_ingress_queue(void)
1637 {
1638         static_key_slow_inc(&ingress_needed);
1639 }
1640 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1641
1642 void net_dec_ingress_queue(void)
1643 {
1644         static_key_slow_dec(&ingress_needed);
1645 }
1646 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1647 #endif
1648
1649 static struct static_key netstamp_needed __read_mostly;
1650 #ifdef HAVE_JUMP_LABEL
1651 /* We are not allowed to call static_key_slow_dec() from irq context
1652  * If net_disable_timestamp() is called from irq context, defer the
1653  * static_key_slow_dec() calls.
1654  */
1655 static atomic_t netstamp_needed_deferred;
1656 #endif
1657
1658 void net_enable_timestamp(void)
1659 {
1660 #ifdef HAVE_JUMP_LABEL
1661         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1662
1663         if (deferred) {
1664                 while (--deferred)
1665                         static_key_slow_dec(&netstamp_needed);
1666                 return;
1667         }
1668 #endif
1669         static_key_slow_inc(&netstamp_needed);
1670 }
1671 EXPORT_SYMBOL(net_enable_timestamp);
1672
1673 void net_disable_timestamp(void)
1674 {
1675 #ifdef HAVE_JUMP_LABEL
1676         if (in_interrupt()) {
1677                 atomic_inc(&netstamp_needed_deferred);
1678                 return;
1679         }
1680 #endif
1681         static_key_slow_dec(&netstamp_needed);
1682 }
1683 EXPORT_SYMBOL(net_disable_timestamp);
1684
1685 static inline void net_timestamp_set(struct sk_buff *skb)
1686 {
1687         skb->tstamp.tv64 = 0;
1688         if (static_key_false(&netstamp_needed))
1689                 __net_timestamp(skb);
1690 }
1691
1692 #define net_timestamp_check(COND, SKB)                  \
1693         if (static_key_false(&netstamp_needed)) {               \
1694                 if ((COND) && !(SKB)->tstamp.tv64)      \
1695                         __net_timestamp(SKB);           \
1696         }                                               \
1697
1698 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1699 {
1700         unsigned int len;
1701
1702         if (!(dev->flags & IFF_UP))
1703                 return false;
1704
1705         len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1706         if (skb->len <= len)
1707                 return true;
1708
1709         /* if TSO is enabled, we don't care about the length as the packet
1710          * could be forwarded without being segmented before
1711          */
1712         if (skb_is_gso(skb))
1713                 return true;
1714
1715         return false;
1716 }
1717 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1718
1719 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1720 {
1721         if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1722                 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1723                         atomic_long_inc(&dev->rx_dropped);
1724                         kfree_skb(skb);
1725                         return NET_RX_DROP;
1726                 }
1727         }
1728
1729         if (unlikely(!is_skb_forwardable(dev, skb))) {
1730                 atomic_long_inc(&dev->rx_dropped);
1731                 kfree_skb(skb);
1732                 return NET_RX_DROP;
1733         }
1734
1735         skb_scrub_packet(skb, true);
1736         skb->priority = 0;
1737         skb->protocol = eth_type_trans(skb, dev);
1738         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1739
1740         return 0;
1741 }
1742 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1743
1744 /**
1745  * dev_forward_skb - loopback an skb to another netif
1746  *
1747  * @dev: destination network device
1748  * @skb: buffer to forward
1749  *
1750  * return values:
1751  *      NET_RX_SUCCESS  (no congestion)
1752  *      NET_RX_DROP     (packet was dropped, but freed)
1753  *
1754  * dev_forward_skb can be used for injecting an skb from the
1755  * start_xmit function of one device into the receive queue
1756  * of another device.
1757  *
1758  * The receiving device may be in another namespace, so
1759  * we have to clear all information in the skb that could
1760  * impact namespace isolation.
1761  */
1762 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1763 {
1764         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1765 }
1766 EXPORT_SYMBOL_GPL(dev_forward_skb);
1767
1768 static inline int deliver_skb(struct sk_buff *skb,
1769                               struct packet_type *pt_prev,
1770                               struct net_device *orig_dev)
1771 {
1772         if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1773                 return -ENOMEM;
1774         atomic_inc(&skb->users);
1775         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1776 }
1777
1778 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1779                                           struct packet_type **pt,
1780                                           struct net_device *orig_dev,
1781                                           __be16 type,
1782                                           struct list_head *ptype_list)
1783 {
1784         struct packet_type *ptype, *pt_prev = *pt;
1785
1786         list_for_each_entry_rcu(ptype, ptype_list, list) {
1787                 if (ptype->type != type)
1788                         continue;
1789                 if (pt_prev)
1790                         deliver_skb(skb, pt_prev, orig_dev);
1791                 pt_prev = ptype;
1792         }
1793         *pt = pt_prev;
1794 }
1795
1796 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1797 {
1798         if (!ptype->af_packet_priv || !skb->sk)
1799                 return false;
1800
1801         if (ptype->id_match)
1802                 return ptype->id_match(ptype, skb->sk);
1803         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1804                 return true;
1805
1806         return false;
1807 }
1808
1809 /*
1810  *      Support routine. Sends outgoing frames to any network
1811  *      taps currently in use.
1812  */
1813
1814 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1815 {
1816         struct packet_type *ptype;
1817         struct sk_buff *skb2 = NULL;
1818         struct packet_type *pt_prev = NULL;
1819         struct list_head *ptype_list = &ptype_all;
1820
1821         rcu_read_lock();
1822 again:
1823         list_for_each_entry_rcu(ptype, ptype_list, list) {
1824                 /* Never send packets back to the socket
1825                  * they originated from - MvS (miquels@drinkel.ow.org)
1826                  */
1827                 if (skb_loop_sk(ptype, skb))
1828                         continue;
1829
1830                 if (pt_prev) {
1831                         deliver_skb(skb2, pt_prev, skb->dev);
1832                         pt_prev = ptype;
1833                         continue;
1834                 }
1835
1836                 /* need to clone skb, done only once */
1837                 skb2 = skb_clone(skb, GFP_ATOMIC);
1838                 if (!skb2)
1839                         goto out_unlock;
1840
1841                 net_timestamp_set(skb2);
1842
1843                 /* skb->nh should be correctly
1844                  * set by sender, so that the second statement is
1845                  * just protection against buggy protocols.
1846                  */
1847                 skb_reset_mac_header(skb2);
1848
1849                 if (skb_network_header(skb2) < skb2->data ||
1850                     skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1851                         net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1852                                              ntohs(skb2->protocol),
1853                                              dev->name);
1854                         skb_reset_network_header(skb2);
1855                 }
1856
1857                 skb2->transport_header = skb2->network_header;
1858                 skb2->pkt_type = PACKET_OUTGOING;
1859                 pt_prev = ptype;
1860         }
1861
1862         if (ptype_list == &ptype_all) {
1863                 ptype_list = &dev->ptype_all;
1864                 goto again;
1865         }
1866 out_unlock:
1867         if (pt_prev)
1868                 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1869         rcu_read_unlock();
1870 }
1871
1872 /**
1873  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1874  * @dev: Network device
1875  * @txq: number of queues available
1876  *
1877  * If real_num_tx_queues is changed the tc mappings may no longer be
1878  * valid. To resolve this verify the tc mapping remains valid and if
1879  * not NULL the mapping. With no priorities mapping to this
1880  * offset/count pair it will no longer be used. In the worst case TC0
1881  * is invalid nothing can be done so disable priority mappings. If is
1882  * expected that drivers will fix this mapping if they can before
1883  * calling netif_set_real_num_tx_queues.
1884  */
1885 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1886 {
1887         int i;
1888         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1889
1890         /* If TC0 is invalidated disable TC mapping */
1891         if (tc->offset + tc->count > txq) {
1892                 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1893                 dev->num_tc = 0;
1894                 return;
1895         }
1896
1897         /* Invalidated prio to tc mappings set to TC0 */
1898         for (i = 1; i < TC_BITMASK + 1; i++) {
1899                 int q = netdev_get_prio_tc_map(dev, i);
1900
1901                 tc = &dev->tc_to_txq[q];
1902                 if (tc->offset + tc->count > txq) {
1903                         pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1904                                 i, q);
1905                         netdev_set_prio_tc_map(dev, i, 0);
1906                 }
1907         }
1908 }
1909
1910 #ifdef CONFIG_XPS
1911 static DEFINE_MUTEX(xps_map_mutex);
1912 #define xmap_dereference(P)             \
1913         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1914
1915 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1916                                         int cpu, u16 index)
1917 {
1918         struct xps_map *map = NULL;
1919         int pos;
1920
1921         if (dev_maps)
1922                 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1923
1924         for (pos = 0; map && pos < map->len; pos++) {
1925                 if (map->queues[pos] == index) {
1926                         if (map->len > 1) {
1927                                 map->queues[pos] = map->queues[--map->len];
1928                         } else {
1929                                 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1930                                 kfree_rcu(map, rcu);
1931                                 map = NULL;
1932                         }
1933                         break;
1934                 }
1935         }
1936
1937         return map;
1938 }
1939
1940 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1941 {
1942         struct xps_dev_maps *dev_maps;
1943         int cpu, i;
1944         bool active = false;
1945
1946         mutex_lock(&xps_map_mutex);
1947         dev_maps = xmap_dereference(dev->xps_maps);
1948
1949         if (!dev_maps)
1950                 goto out_no_maps;
1951
1952         for_each_possible_cpu(cpu) {
1953                 for (i = index; i < dev->num_tx_queues; i++) {
1954                         if (!remove_xps_queue(dev_maps, cpu, i))
1955                                 break;
1956                 }
1957                 if (i == dev->num_tx_queues)
1958                         active = true;
1959         }
1960
1961         if (!active) {
1962                 RCU_INIT_POINTER(dev->xps_maps, NULL);
1963                 kfree_rcu(dev_maps, rcu);
1964         }
1965
1966         for (i = index; i < dev->num_tx_queues; i++)
1967                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1968                                              NUMA_NO_NODE);
1969
1970 out_no_maps:
1971         mutex_unlock(&xps_map_mutex);
1972 }
1973
1974 static struct xps_map *expand_xps_map(struct xps_map *map,
1975                                       int cpu, u16 index)
1976 {
1977         struct xps_map *new_map;
1978         int alloc_len = XPS_MIN_MAP_ALLOC;
1979         int i, pos;
1980
1981         for (pos = 0; map && pos < map->len; pos++) {
1982                 if (map->queues[pos] != index)
1983                         continue;
1984                 return map;
1985         }
1986
1987         /* Need to add queue to this CPU's existing map */
1988         if (map) {
1989                 if (pos < map->alloc_len)
1990                         return map;
1991
1992                 alloc_len = map->alloc_len * 2;
1993         }
1994
1995         /* Need to allocate new map to store queue on this CPU's map */
1996         new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1997                                cpu_to_node(cpu));
1998         if (!new_map)
1999                 return NULL;
2000
2001         for (i = 0; i < pos; i++)
2002                 new_map->queues[i] = map->queues[i];
2003         new_map->alloc_len = alloc_len;
2004         new_map->len = pos;
2005
2006         return new_map;
2007 }
2008
2009 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2010                         u16 index)
2011 {
2012         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2013         struct xps_map *map, *new_map;
2014         int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
2015         int cpu, numa_node_id = -2;
2016         bool active = false;
2017
2018         mutex_lock(&xps_map_mutex);
2019
2020         dev_maps = xmap_dereference(dev->xps_maps);
2021
2022         /* allocate memory for queue storage */
2023         for_each_online_cpu(cpu) {
2024                 if (!cpumask_test_cpu(cpu, mask))
2025                         continue;
2026
2027                 if (!new_dev_maps)
2028                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2029                 if (!new_dev_maps) {
2030                         mutex_unlock(&xps_map_mutex);
2031                         return -ENOMEM;
2032                 }
2033
2034                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2035                                  NULL;
2036
2037                 map = expand_xps_map(map, cpu, index);
2038                 if (!map)
2039                         goto error;
2040
2041                 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2042         }
2043
2044         if (!new_dev_maps)
2045                 goto out_no_new_maps;
2046
2047         for_each_possible_cpu(cpu) {
2048                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2049                         /* add queue to CPU maps */
2050                         int pos = 0;
2051
2052                         map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2053                         while ((pos < map->len) && (map->queues[pos] != index))
2054                                 pos++;
2055
2056                         if (pos == map->len)
2057                                 map->queues[map->len++] = index;
2058 #ifdef CONFIG_NUMA
2059                         if (numa_node_id == -2)
2060                                 numa_node_id = cpu_to_node(cpu);
2061                         else if (numa_node_id != cpu_to_node(cpu))
2062                                 numa_node_id = -1;
2063 #endif
2064                 } else if (dev_maps) {
2065                         /* fill in the new device map from the old device map */
2066                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2067                         RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2068                 }
2069
2070         }
2071
2072         rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2073
2074         /* Cleanup old maps */
2075         if (dev_maps) {
2076                 for_each_possible_cpu(cpu) {
2077                         new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2078                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2079                         if (map && map != new_map)
2080                                 kfree_rcu(map, rcu);
2081                 }
2082
2083                 kfree_rcu(dev_maps, rcu);
2084         }
2085
2086         dev_maps = new_dev_maps;
2087         active = true;
2088
2089 out_no_new_maps:
2090         /* update Tx queue numa node */
2091         netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2092                                      (numa_node_id >= 0) ? numa_node_id :
2093                                      NUMA_NO_NODE);
2094
2095         if (!dev_maps)
2096                 goto out_no_maps;
2097
2098         /* removes queue from unused CPUs */
2099         for_each_possible_cpu(cpu) {
2100                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2101                         continue;
2102
2103                 if (remove_xps_queue(dev_maps, cpu, index))
2104                         active = true;
2105         }
2106
2107         /* free map if not active */
2108         if (!active) {
2109                 RCU_INIT_POINTER(dev->xps_maps, NULL);
2110                 kfree_rcu(dev_maps, rcu);
2111         }
2112
2113 out_no_maps:
2114         mutex_unlock(&xps_map_mutex);
2115
2116         return 0;
2117 error:
2118         /* remove any maps that we added */
2119         for_each_possible_cpu(cpu) {
2120                 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2121                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2122                                  NULL;
2123                 if (new_map && new_map != map)
2124                         kfree(new_map);
2125         }
2126
2127         mutex_unlock(&xps_map_mutex);
2128
2129         kfree(new_dev_maps);
2130         return -ENOMEM;
2131 }
2132 EXPORT_SYMBOL(netif_set_xps_queue);
2133
2134 #endif
2135 /*
2136  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2137  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2138  */
2139 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2140 {
2141         int rc;
2142
2143         if (txq < 1 || txq > dev->num_tx_queues)
2144                 return -EINVAL;
2145
2146         if (dev->reg_state == NETREG_REGISTERED ||
2147             dev->reg_state == NETREG_UNREGISTERING) {
2148                 ASSERT_RTNL();
2149
2150                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2151                                                   txq);
2152                 if (rc)
2153                         return rc;
2154
2155                 if (dev->num_tc)
2156                         netif_setup_tc(dev, txq);
2157
2158                 if (txq < dev->real_num_tx_queues) {
2159                         qdisc_reset_all_tx_gt(dev, txq);
2160 #ifdef CONFIG_XPS
2161                         netif_reset_xps_queues_gt(dev, txq);
2162 #endif
2163                 }
2164         }
2165
2166         dev->real_num_tx_queues = txq;
2167         return 0;
2168 }
2169 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2170
2171 #ifdef CONFIG_SYSFS
2172 /**
2173  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2174  *      @dev: Network device
2175  *      @rxq: Actual number of RX queues
2176  *
2177  *      This must be called either with the rtnl_lock held or before
2178  *      registration of the net device.  Returns 0 on success, or a
2179  *      negative error code.  If called before registration, it always
2180  *      succeeds.
2181  */
2182 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2183 {
2184         int rc;
2185
2186         if (rxq < 1 || rxq > dev->num_rx_queues)
2187                 return -EINVAL;
2188
2189         if (dev->reg_state == NETREG_REGISTERED) {
2190                 ASSERT_RTNL();
2191
2192                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2193                                                   rxq);
2194                 if (rc)
2195                         return rc;
2196         }
2197
2198         dev->real_num_rx_queues = rxq;
2199         return 0;
2200 }
2201 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2202 #endif
2203
2204 /**
2205  * netif_get_num_default_rss_queues - default number of RSS queues
2206  *
2207  * This routine should set an upper limit on the number of RSS queues
2208  * used by default by multiqueue devices.
2209  */
2210 int netif_get_num_default_rss_queues(void)
2211 {
2212         return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2213 }
2214 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2215
2216 static inline void __netif_reschedule(struct Qdisc *q)
2217 {
2218         struct softnet_data *sd;
2219         unsigned long flags;
2220
2221         local_irq_save(flags);
2222         sd = this_cpu_ptr(&softnet_data);
2223         q->next_sched = NULL;
2224         *sd->output_queue_tailp = q;
2225         sd->output_queue_tailp = &q->next_sched;
2226         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2227         local_irq_restore(flags);
2228 }
2229
2230 void __netif_schedule(struct Qdisc *q)
2231 {
2232         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2233                 __netif_reschedule(q);
2234 }
2235 EXPORT_SYMBOL(__netif_schedule);
2236
2237 struct dev_kfree_skb_cb {
2238         enum skb_free_reason reason;
2239 };
2240
2241 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2242 {
2243         return (struct dev_kfree_skb_cb *)skb->cb;
2244 }
2245
2246 void netif_schedule_queue(struct netdev_queue *txq)
2247 {
2248         rcu_read_lock();
2249         if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2250                 struct Qdisc *q = rcu_dereference(txq->qdisc);
2251
2252                 __netif_schedule(q);
2253         }
2254         rcu_read_unlock();
2255 }
2256 EXPORT_SYMBOL(netif_schedule_queue);
2257
2258 /**
2259  *      netif_wake_subqueue - allow sending packets on subqueue
2260  *      @dev: network device
2261  *      @queue_index: sub queue index
2262  *
2263  * Resume individual transmit queue of a device with multiple transmit queues.
2264  */
2265 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2266 {
2267         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2268
2269         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2270                 struct Qdisc *q;
2271
2272                 rcu_read_lock();
2273                 q = rcu_dereference(txq->qdisc);
2274                 __netif_schedule(q);
2275                 rcu_read_unlock();
2276         }
2277 }
2278 EXPORT_SYMBOL(netif_wake_subqueue);
2279
2280 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2281 {
2282         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2283                 struct Qdisc *q;
2284
2285                 rcu_read_lock();
2286                 q = rcu_dereference(dev_queue->qdisc);
2287                 __netif_schedule(q);
2288                 rcu_read_unlock();
2289         }
2290 }
2291 EXPORT_SYMBOL(netif_tx_wake_queue);
2292
2293 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2294 {
2295         unsigned long flags;
2296
2297         if (likely(atomic_read(&skb->users) == 1)) {
2298                 smp_rmb();
2299                 atomic_set(&skb->users, 0);
2300         } else if (likely(!atomic_dec_and_test(&skb->users))) {
2301                 return;
2302         }
2303         get_kfree_skb_cb(skb)->reason = reason;
2304         local_irq_save(flags);
2305         skb->next = __this_cpu_read(softnet_data.completion_queue);
2306         __this_cpu_write(softnet_data.completion_queue, skb);
2307         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2308         local_irq_restore(flags);
2309 }
2310 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2311
2312 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2313 {
2314         if (in_irq() || irqs_disabled())
2315                 __dev_kfree_skb_irq(skb, reason);
2316         else
2317                 dev_kfree_skb(skb);
2318 }
2319 EXPORT_SYMBOL(__dev_kfree_skb_any);
2320
2321
2322 /**
2323  * netif_device_detach - mark device as removed
2324  * @dev: network device
2325  *
2326  * Mark device as removed from system and therefore no longer available.
2327  */
2328 void netif_device_detach(struct net_device *dev)
2329 {
2330         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2331             netif_running(dev)) {
2332                 netif_tx_stop_all_queues(dev);
2333         }
2334 }
2335 EXPORT_SYMBOL(netif_device_detach);
2336
2337 /**
2338  * netif_device_attach - mark device as attached
2339  * @dev: network device
2340  *
2341  * Mark device as attached from system and restart if needed.
2342  */
2343 void netif_device_attach(struct net_device *dev)
2344 {
2345         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2346             netif_running(dev)) {
2347                 netif_tx_wake_all_queues(dev);
2348                 __netdev_watchdog_up(dev);
2349         }
2350 }
2351 EXPORT_SYMBOL(netif_device_attach);
2352
2353 static void skb_warn_bad_offload(const struct sk_buff *skb)
2354 {
2355         static const netdev_features_t null_features = 0;
2356         struct net_device *dev = skb->dev;
2357         const char *driver = "";
2358
2359         if (!net_ratelimit())
2360                 return;
2361
2362         if (dev && dev->dev.parent)
2363                 driver = dev_driver_string(dev->dev.parent);
2364
2365         WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2366              "gso_type=%d ip_summed=%d\n",
2367              driver, dev ? &dev->features : &null_features,
2368              skb->sk ? &skb->sk->sk_route_caps : &null_features,
2369              skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2370              skb_shinfo(skb)->gso_type, skb->ip_summed);
2371 }
2372
2373 /*
2374  * Invalidate hardware checksum when packet is to be mangled, and
2375  * complete checksum manually on outgoing path.
2376  */
2377 int skb_checksum_help(struct sk_buff *skb)
2378 {
2379         __wsum csum;
2380         int ret = 0, offset;
2381
2382         if (skb->ip_summed == CHECKSUM_COMPLETE)
2383                 goto out_set_summed;
2384
2385         if (unlikely(skb_shinfo(skb)->gso_size)) {
2386                 skb_warn_bad_offload(skb);
2387                 return -EINVAL;
2388         }
2389
2390         /* Before computing a checksum, we should make sure no frag could
2391          * be modified by an external entity : checksum could be wrong.
2392          */
2393         if (skb_has_shared_frag(skb)) {
2394                 ret = __skb_linearize(skb);
2395                 if (ret)
2396                         goto out;
2397         }
2398
2399         offset = skb_checksum_start_offset(skb);
2400         BUG_ON(offset >= skb_headlen(skb));
2401         csum = skb_checksum(skb, offset, skb->len - offset, 0);
2402
2403         offset += skb->csum_offset;
2404         BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2405
2406         if (skb_cloned(skb) &&
2407             !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2408                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2409                 if (ret)
2410                         goto out;
2411         }
2412
2413         *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2414 out_set_summed:
2415         skb->ip_summed = CHECKSUM_NONE;
2416 out:
2417         return ret;
2418 }
2419 EXPORT_SYMBOL(skb_checksum_help);
2420
2421 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2422 {
2423         __be16 type = skb->protocol;
2424
2425         /* Tunnel gso handlers can set protocol to ethernet. */
2426         if (type == htons(ETH_P_TEB)) {
2427                 struct ethhdr *eth;
2428
2429                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2430                         return 0;
2431
2432                 eth = (struct ethhdr *)skb_mac_header(skb);
2433                 type = eth->h_proto;
2434         }
2435
2436         return __vlan_get_protocol(skb, type, depth);
2437 }
2438
2439 /**
2440  *      skb_mac_gso_segment - mac layer segmentation handler.
2441  *      @skb: buffer to segment
2442  *      @features: features for the output path (see dev->features)
2443  */
2444 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2445                                     netdev_features_t features)
2446 {
2447         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2448         struct packet_offload *ptype;
2449         int vlan_depth = skb->mac_len;
2450         __be16 type = skb_network_protocol(skb, &vlan_depth);
2451
2452         if (unlikely(!type))
2453                 return ERR_PTR(-EINVAL);
2454
2455         __skb_pull(skb, vlan_depth);
2456
2457         rcu_read_lock();
2458         list_for_each_entry_rcu(ptype, &offload_base, list) {
2459                 if (ptype->type == type && ptype->callbacks.gso_segment) {
2460                         segs = ptype->callbacks.gso_segment(skb, features);
2461                         break;
2462                 }
2463         }
2464         rcu_read_unlock();
2465
2466         __skb_push(skb, skb->data - skb_mac_header(skb));
2467
2468         return segs;
2469 }
2470 EXPORT_SYMBOL(skb_mac_gso_segment);
2471
2472
2473 /* openvswitch calls this on rx path, so we need a different check.
2474  */
2475 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2476 {
2477         if (tx_path)
2478                 return skb->ip_summed != CHECKSUM_PARTIAL;
2479         else
2480                 return skb->ip_summed == CHECKSUM_NONE;
2481 }
2482
2483 /**
2484  *      __skb_gso_segment - Perform segmentation on skb.
2485  *      @skb: buffer to segment
2486  *      @features: features for the output path (see dev->features)
2487  *      @tx_path: whether it is called in TX path
2488  *
2489  *      This function segments the given skb and returns a list of segments.
2490  *
2491  *      It may return NULL if the skb requires no segmentation.  This is
2492  *      only possible when GSO is used for verifying header integrity.
2493  */
2494 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2495                                   netdev_features_t features, bool tx_path)
2496 {
2497         if (unlikely(skb_needs_check(skb, tx_path))) {
2498                 int err;
2499
2500                 skb_warn_bad_offload(skb);
2501
2502                 err = skb_cow_head(skb, 0);
2503                 if (err < 0)
2504                         return ERR_PTR(err);
2505         }
2506
2507         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2508         SKB_GSO_CB(skb)->encap_level = 0;
2509
2510         skb_reset_mac_header(skb);
2511         skb_reset_mac_len(skb);
2512
2513         return skb_mac_gso_segment(skb, features);
2514 }
2515 EXPORT_SYMBOL(__skb_gso_segment);
2516
2517 /* Take action when hardware reception checksum errors are detected. */
2518 #ifdef CONFIG_BUG
2519 void netdev_rx_csum_fault(struct net_device *dev)
2520 {
2521         if (net_ratelimit()) {
2522                 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2523                 dump_stack();
2524         }
2525 }
2526 EXPORT_SYMBOL(netdev_rx_csum_fault);
2527 #endif
2528
2529 /* Actually, we should eliminate this check as soon as we know, that:
2530  * 1. IOMMU is present and allows to map all the memory.
2531  * 2. No high memory really exists on this machine.
2532  */
2533
2534 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2535 {
2536 #ifdef CONFIG_HIGHMEM
2537         int i;
2538         if (!(dev->features & NETIF_F_HIGHDMA)) {
2539                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2540                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2541                         if (PageHighMem(skb_frag_page(frag)))
2542                                 return 1;
2543                 }
2544         }
2545
2546         if (PCI_DMA_BUS_IS_PHYS) {
2547                 struct device *pdev = dev->dev.parent;
2548
2549                 if (!pdev)
2550                         return 0;
2551                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2552                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2553                         dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2554                         if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2555                                 return 1;
2556                 }
2557         }
2558 #endif
2559         return 0;
2560 }
2561
2562 /* If MPLS offload request, verify we are testing hardware MPLS features
2563  * instead of standard features for the netdev.
2564  */
2565 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2566 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2567                                            netdev_features_t features,
2568                                            __be16 type)
2569 {
2570         if (eth_p_mpls(type))
2571                 features &= skb->dev->mpls_features;
2572
2573         return features;
2574 }
2575 #else
2576 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2577                                            netdev_features_t features,
2578                                            __be16 type)
2579 {
2580         return features;
2581 }
2582 #endif
2583
2584 static netdev_features_t harmonize_features(struct sk_buff *skb,
2585         netdev_features_t features)
2586 {
2587         int tmp;
2588         __be16 type;
2589
2590         type = skb_network_protocol(skb, &tmp);
2591         features = net_mpls_features(skb, features, type);
2592
2593         if (skb->ip_summed != CHECKSUM_NONE &&
2594             !can_checksum_protocol(features, type)) {
2595                 features &= ~NETIF_F_ALL_CSUM;
2596         } else if (illegal_highdma(skb->dev, skb)) {
2597                 features &= ~NETIF_F_SG;
2598         }
2599
2600         return features;
2601 }
2602
2603 netdev_features_t passthru_features_check(struct sk_buff *skb,
2604                                           struct net_device *dev,
2605                                           netdev_features_t features)
2606 {
2607         return features;
2608 }
2609 EXPORT_SYMBOL(passthru_features_check);
2610
2611 static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2612                                              struct net_device *dev,
2613                                              netdev_features_t features)
2614 {
2615         return vlan_features_check(skb, features);
2616 }
2617
2618 netdev_features_t netif_skb_features(struct sk_buff *skb)
2619 {
2620         struct net_device *dev = skb->dev;
2621         netdev_features_t features = dev->features;
2622         u16 gso_segs = skb_shinfo(skb)->gso_segs;
2623
2624         if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2625                 features &= ~NETIF_F_GSO_MASK;
2626
2627         /* If encapsulation offload request, verify we are testing
2628          * hardware encapsulation features instead of standard
2629          * features for the netdev
2630          */
2631         if (skb->encapsulation)
2632                 features &= dev->hw_enc_features;
2633
2634         if (skb_vlan_tagged(skb))
2635                 features = netdev_intersect_features(features,
2636                                                      dev->vlan_features |
2637                                                      NETIF_F_HW_VLAN_CTAG_TX |
2638                                                      NETIF_F_HW_VLAN_STAG_TX);
2639
2640         if (dev->netdev_ops->ndo_features_check)
2641                 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2642                                                                 features);
2643         else
2644                 features &= dflt_features_check(skb, dev, features);
2645
2646         return harmonize_features(skb, features);
2647 }
2648 EXPORT_SYMBOL(netif_skb_features);
2649
2650 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2651                     struct netdev_queue *txq, bool more)
2652 {
2653         unsigned int len;
2654         int rc;
2655
2656         if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2657                 dev_queue_xmit_nit(skb, dev);
2658
2659         len = skb->len;
2660         trace_net_dev_start_xmit(skb, dev);
2661         rc = netdev_start_xmit(skb, dev, txq, more);
2662         trace_net_dev_xmit(skb, rc, dev, len);
2663
2664         return rc;
2665 }
2666
2667 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2668                                     struct netdev_queue *txq, int *ret)
2669 {
2670         struct sk_buff *skb = first;
2671         int rc = NETDEV_TX_OK;
2672
2673         while (skb) {
2674                 struct sk_buff *next = skb->next;
2675
2676                 skb->next = NULL;
2677                 rc = xmit_one(skb, dev, txq, next != NULL);
2678                 if (unlikely(!dev_xmit_complete(rc))) {
2679                         skb->next = next;
2680                         goto out;
2681                 }
2682
2683                 skb = next;
2684                 if (netif_xmit_stopped(txq) && skb) {
2685                         rc = NETDEV_TX_BUSY;
2686                         break;
2687                 }
2688         }
2689
2690 out:
2691         *ret = rc;
2692         return skb;
2693 }
2694
2695 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2696                                           netdev_features_t features)
2697 {
2698         if (skb_vlan_tag_present(skb) &&
2699             !vlan_hw_offload_capable(features, skb->vlan_proto))
2700                 skb = __vlan_hwaccel_push_inside(skb);
2701         return skb;
2702 }
2703
2704 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2705 {
2706         netdev_features_t features;
2707
2708         if (skb->next)
2709                 return skb;
2710
2711         features = netif_skb_features(skb);
2712         skb = validate_xmit_vlan(skb, features);
2713         if (unlikely(!skb))
2714                 goto out_null;
2715
2716         if (netif_needs_gso(skb, features)) {
2717                 struct sk_buff *segs;
2718
2719                 segs = skb_gso_segment(skb, features);
2720                 if (IS_ERR(segs)) {
2721                         goto out_kfree_skb;
2722                 } else if (segs) {
2723                         consume_skb(skb);
2724                         skb = segs;
2725                 }
2726         } else {
2727                 if (skb_needs_linearize(skb, features) &&
2728                     __skb_linearize(skb))
2729                         goto out_kfree_skb;
2730
2731                 /* If packet is not checksummed and device does not
2732                  * support checksumming for this protocol, complete
2733                  * checksumming here.
2734                  */
2735                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2736                         if (skb->encapsulation)
2737                                 skb_set_inner_transport_header(skb,
2738                                                                skb_checksum_start_offset(skb));
2739                         else
2740                                 skb_set_transport_header(skb,
2741                                                          skb_checksum_start_offset(skb));
2742                         if (!(features & NETIF_F_ALL_CSUM) &&
2743                             skb_checksum_help(skb))
2744                                 goto out_kfree_skb;
2745                 }
2746         }
2747
2748         return skb;
2749
2750 out_kfree_skb:
2751         kfree_skb(skb);
2752 out_null:
2753         return NULL;
2754 }
2755
2756 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2757 {
2758         struct sk_buff *next, *head = NULL, *tail;
2759
2760         for (; skb != NULL; skb = next) {
2761                 next = skb->next;
2762                 skb->next = NULL;
2763
2764                 /* in case skb wont be segmented, point to itself */
2765                 skb->prev = skb;
2766
2767                 skb = validate_xmit_skb(skb, dev);
2768                 if (!skb)
2769                         continue;
2770
2771                 if (!head)
2772                         head = skb;
2773                 else
2774                         tail->next = skb;
2775                 /* If skb was segmented, skb->prev points to
2776                  * the last segment. If not, it still contains skb.
2777                  */
2778                 tail = skb->prev;
2779         }
2780         return head;
2781 }
2782
2783 static void qdisc_pkt_len_init(struct sk_buff *skb)
2784 {
2785         const struct skb_shared_info *shinfo = skb_shinfo(skb);
2786
2787         qdisc_skb_cb(skb)->pkt_len = skb->len;
2788
2789         /* To get more precise estimation of bytes sent on wire,
2790          * we add to pkt_len the headers size of all segments
2791          */
2792         if (shinfo->gso_size)  {
2793                 unsigned int hdr_len;
2794                 u16 gso_segs = shinfo->gso_segs;
2795
2796                 /* mac layer + network layer */
2797                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2798
2799                 /* + transport layer */
2800                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2801                         hdr_len += tcp_hdrlen(skb);
2802                 else
2803                         hdr_len += sizeof(struct udphdr);
2804
2805                 if (shinfo->gso_type & SKB_GSO_DODGY)
2806                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2807                                                 shinfo->gso_size);
2808
2809                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2810         }
2811 }
2812
2813 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2814                                  struct net_device *dev,
2815                                  struct netdev_queue *txq)
2816 {
2817         spinlock_t *root_lock = qdisc_lock(q);
2818         bool contended;
2819         int rc;
2820
2821         qdisc_pkt_len_init(skb);
2822         qdisc_calculate_pkt_len(skb, q);
2823         /*
2824          * Heuristic to force contended enqueues to serialize on a
2825          * separate lock before trying to get qdisc main lock.
2826          * This permits __QDISC___STATE_RUNNING owner to get the lock more
2827          * often and dequeue packets faster.
2828          */
2829         contended = qdisc_is_running(q);
2830         if (unlikely(contended))
2831                 spin_lock(&q->busylock);
2832
2833         spin_lock(root_lock);
2834         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2835                 kfree_skb(skb);
2836                 rc = NET_XMIT_DROP;
2837         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2838                    qdisc_run_begin(q)) {
2839                 /*
2840                  * This is a work-conserving queue; there are no old skbs
2841                  * waiting to be sent out; and the qdisc is not running -
2842                  * xmit the skb directly.
2843                  */
2844
2845                 qdisc_bstats_update(q, skb);
2846
2847                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2848                         if (unlikely(contended)) {
2849                                 spin_unlock(&q->busylock);
2850                                 contended = false;
2851                         }
2852                         __qdisc_run(q);
2853                 } else
2854                         qdisc_run_end(q);
2855
2856                 rc = NET_XMIT_SUCCESS;
2857         } else {
2858                 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2859                 if (qdisc_run_begin(q)) {
2860                         if (unlikely(contended)) {
2861                                 spin_unlock(&q->busylock);
2862                                 contended = false;
2863                         }
2864                         __qdisc_run(q);
2865                 }
2866         }
2867         spin_unlock(root_lock);
2868         if (unlikely(contended))
2869                 spin_unlock(&q->busylock);
2870         return rc;
2871 }
2872
2873 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2874 static void skb_update_prio(struct sk_buff *skb)
2875 {
2876         struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2877
2878         if (!skb->priority && skb->sk && map) {
2879                 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2880
2881                 if (prioidx < map->priomap_len)
2882                         skb->priority = map->priomap[prioidx];
2883         }
2884 }
2885 #else
2886 #define skb_update_prio(skb)
2887 #endif
2888
2889 DEFINE_PER_CPU(int, xmit_recursion);
2890 EXPORT_SYMBOL(xmit_recursion);
2891
2892 #define RECURSION_LIMIT 10
2893
2894 /**
2895  *      dev_loopback_xmit - loop back @skb
2896  *      @skb: buffer to transmit
2897  */
2898 int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
2899 {
2900         skb_reset_mac_header(skb);
2901         __skb_pull(skb, skb_network_offset(skb));
2902         skb->pkt_type = PACKET_LOOPBACK;
2903         skb->ip_summed = CHECKSUM_UNNECESSARY;
2904         WARN_ON(!skb_dst(skb));
2905         skb_dst_force(skb);
2906         netif_rx_ni(skb);
2907         return 0;
2908 }
2909 EXPORT_SYMBOL(dev_loopback_xmit);
2910
2911 /**
2912  *      __dev_queue_xmit - transmit a buffer
2913  *      @skb: buffer to transmit
2914  *      @accel_priv: private data used for L2 forwarding offload
2915  *
2916  *      Queue a buffer for transmission to a network device. The caller must
2917  *      have set the device and priority and built the buffer before calling
2918  *      this function. The function can be called from an interrupt.
2919  *
2920  *      A negative errno code is returned on a failure. A success does not
2921  *      guarantee the frame will be transmitted as it may be dropped due
2922  *      to congestion or traffic shaping.
2923  *
2924  * -----------------------------------------------------------------------------------
2925  *      I notice this method can also return errors from the queue disciplines,
2926  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2927  *      be positive.
2928  *
2929  *      Regardless of the return value, the skb is consumed, so it is currently
2930  *      difficult to retry a send to this method.  (You can bump the ref count
2931  *      before sending to hold a reference for retry if you are careful.)
2932  *
2933  *      When calling this method, interrupts MUST be enabled.  This is because
2934  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2935  *          --BLG
2936  */
2937 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2938 {
2939         struct net_device *dev = skb->dev;
2940         struct netdev_queue *txq;
2941         struct Qdisc *q;
2942         int rc = -ENOMEM;
2943
2944         skb_reset_mac_header(skb);
2945
2946         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2947                 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2948
2949         /* Disable soft irqs for various locks below. Also
2950          * stops preemption for RCU.
2951          */
2952         rcu_read_lock_bh();
2953
2954         skb_update_prio(skb);
2955
2956         /* If device/qdisc don't need skb->dst, release it right now while
2957          * its hot in this cpu cache.
2958          */
2959         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2960                 skb_dst_drop(skb);
2961         else
2962                 skb_dst_force(skb);
2963
2964         txq = netdev_pick_tx(dev, skb, accel_priv);
2965         q = rcu_dereference_bh(txq->qdisc);
2966
2967 #ifdef CONFIG_NET_CLS_ACT
2968         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2969 #endif
2970         trace_net_dev_queue(skb);
2971         if (q->enqueue) {
2972                 rc = __dev_xmit_skb(skb, q, dev, txq);
2973                 goto out;
2974         }
2975
2976         /* The device has no queue. Common case for software devices:
2977            loopback, all the sorts of tunnels...
2978
2979            Really, it is unlikely that netif_tx_lock protection is necessary
2980            here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2981            counters.)
2982            However, it is possible, that they rely on protection
2983            made by us here.
2984
2985            Check this and shot the lock. It is not prone from deadlocks.
2986            Either shot noqueue qdisc, it is even simpler 8)
2987          */
2988         if (dev->flags & IFF_UP) {
2989                 int cpu = smp_processor_id(); /* ok because BHs are off */
2990
2991                 if (txq->xmit_lock_owner != cpu) {
2992
2993                         if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2994                                 goto recursion_alert;
2995
2996                         skb = validate_xmit_skb(skb, dev);
2997                         if (!skb)
2998                                 goto drop;
2999
3000                         HARD_TX_LOCK(dev, txq, cpu);
3001
3002                         if (!netif_xmit_stopped(txq)) {
3003                                 __this_cpu_inc(xmit_recursion);
3004                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3005                                 __this_cpu_dec(xmit_recursion);
3006                                 if (dev_xmit_complete(rc)) {
3007                                         HARD_TX_UNLOCK(dev, txq);
3008                                         goto out;
3009                                 }
3010                         }
3011                         HARD_TX_UNLOCK(dev, txq);
3012                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3013                                              dev->name);
3014                 } else {
3015                         /* Recursion is detected! It is possible,
3016                          * unfortunately
3017                          */
3018 recursion_alert:
3019                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3020                                              dev->name);
3021                 }
3022         }
3023
3024         rc = -ENETDOWN;
3025 drop:
3026         rcu_read_unlock_bh();
3027
3028         atomic_long_inc(&dev->tx_dropped);
3029         kfree_skb_list(skb);
3030         return rc;
3031 out:
3032         rcu_read_unlock_bh();
3033         return rc;
3034 }
3035
3036 int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
3037 {
3038         return __dev_queue_xmit(skb, NULL);
3039 }
3040 EXPORT_SYMBOL(dev_queue_xmit_sk);
3041
3042 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3043 {
3044         return __dev_queue_xmit(skb, accel_priv);
3045 }
3046 EXPORT_SYMBOL(dev_queue_xmit_accel);
3047
3048
3049 /*=======================================================================
3050                         Receiver routines
3051   =======================================================================*/
3052
3053 int netdev_max_backlog __read_mostly = 1000;
3054 EXPORT_SYMBOL(netdev_max_backlog);
3055
3056 int netdev_tstamp_prequeue __read_mostly = 1;
3057 int netdev_budget __read_mostly = 300;
3058 int weight_p __read_mostly = 64;            /* old backlog weight */
3059
3060 /* Called with irq disabled */
3061 static inline void ____napi_schedule(struct softnet_data *sd,
3062                                      struct napi_struct *napi)
3063 {
3064         list_add_tail(&napi->poll_list, &sd->poll_list);
3065         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3066 }
3067
3068 #ifdef CONFIG_RPS
3069
3070 /* One global table that all flow-based protocols share. */
3071 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3072 EXPORT_SYMBOL(rps_sock_flow_table);
3073 u32 rps_cpu_mask __read_mostly;
3074 EXPORT_SYMBOL(rps_cpu_mask);
3075
3076 struct static_key rps_needed __read_mostly;
3077
3078 static struct rps_dev_flow *
3079 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3080             struct rps_dev_flow *rflow, u16 next_cpu)
3081 {
3082         if (next_cpu < nr_cpu_ids) {
3083 #ifdef CONFIG_RFS_ACCEL
3084                 struct netdev_rx_queue *rxqueue;
3085                 struct rps_dev_flow_table *flow_table;
3086                 struct rps_dev_flow *old_rflow;
3087                 u32 flow_id;
3088                 u16 rxq_index;
3089                 int rc;
3090
3091                 /* Should we steer this flow to a different hardware queue? */
3092                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3093                     !(dev->features & NETIF_F_NTUPLE))
3094                         goto out;
3095                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3096                 if (rxq_index == skb_get_rx_queue(skb))
3097                         goto out;
3098
3099                 rxqueue = dev->_rx + rxq_index;
3100                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3101                 if (!flow_table)
3102                         goto out;
3103                 flow_id = skb_get_hash(skb) & flow_table->mask;
3104                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3105                                                         rxq_index, flow_id);
3106                 if (rc < 0)
3107                         goto out;
3108                 old_rflow = rflow;
3109                 rflow = &flow_table->flows[flow_id];
3110                 rflow->filter = rc;
3111                 if (old_rflow->filter == rflow->filter)
3112                         old_rflow->filter = RPS_NO_FILTER;
3113         out:
3114 #endif
3115                 rflow->last_qtail =
3116                         per_cpu(softnet_data, next_cpu).input_queue_head;
3117         }
3118
3119         rflow->cpu = next_cpu;
3120         return rflow;
3121 }
3122
3123 /*
3124  * get_rps_cpu is called from netif_receive_skb and returns the target
3125  * CPU from the RPS map of the receiving queue for a given skb.
3126  * rcu_read_lock must be held on entry.
3127  */
3128 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3129                        struct rps_dev_flow **rflowp)
3130 {
3131         const struct rps_sock_flow_table *sock_flow_table;
3132         struct netdev_rx_queue *rxqueue = dev->_rx;
3133         struct rps_dev_flow_table *flow_table;
3134         struct rps_map *map;
3135         int cpu = -1;
3136         u32 tcpu;
3137         u32 hash;
3138
3139         if (skb_rx_queue_recorded(skb)) {
3140                 u16 index = skb_get_rx_queue(skb);
3141
3142                 if (unlikely(index >= dev->real_num_rx_queues)) {
3143                         WARN_ONCE(dev->real_num_rx_queues > 1,
3144                                   "%s received packet on queue %u, but number "
3145                                   "of RX queues is %u\n",
3146                                   dev->name, index, dev->real_num_rx_queues);
3147                         goto done;
3148                 }
3149                 rxqueue += index;
3150         }
3151
3152         /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3153
3154         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3155         map = rcu_dereference(rxqueue->rps_map);
3156         if (!flow_table && !map)
3157                 goto done;
3158
3159         skb_reset_network_header(skb);
3160         hash = skb_get_hash(skb);
3161         if (!hash)
3162                 goto done;
3163
3164         sock_flow_table = rcu_dereference(rps_sock_flow_table);
3165         if (flow_table && sock_flow_table) {
3166                 struct rps_dev_flow *rflow;
3167                 u32 next_cpu;
3168                 u32 ident;
3169
3170                 /* First check into global flow table if there is a match */
3171                 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3172                 if ((ident ^ hash) & ~rps_cpu_mask)
3173                         goto try_rps;
3174
3175                 next_cpu = ident & rps_cpu_mask;
3176
3177                 /* OK, now we know there is a match,
3178                  * we can look at the local (per receive queue) flow table
3179                  */
3180                 rflow = &flow_table->flows[hash & flow_table->mask];
3181                 tcpu = rflow->cpu;
3182
3183                 /*
3184                  * If the desired CPU (where last recvmsg was done) is
3185                  * different from current CPU (one in the rx-queue flow
3186                  * table entry), switch if one of the following holds:
3187                  *   - Current CPU is unset (>= nr_cpu_ids).
3188                  *   - Current CPU is offline.
3189                  *   - The current CPU's queue tail has advanced beyond the
3190                  *     last packet that was enqueued using this table entry.
3191                  *     This guarantees that all previous packets for the flow
3192                  *     have been dequeued, thus preserving in order delivery.
3193                  */
3194                 if (unlikely(tcpu != next_cpu) &&
3195                     (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3196                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3197                       rflow->last_qtail)) >= 0)) {
3198                         tcpu = next_cpu;
3199                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3200                 }
3201
3202                 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3203                         *rflowp = rflow;
3204                         cpu = tcpu;
3205                         goto done;
3206                 }
3207         }
3208
3209 try_rps:
3210
3211         if (map) {
3212                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3213                 if (cpu_online(tcpu)) {
3214                         cpu = tcpu;
3215                         goto done;
3216                 }
3217         }
3218
3219 done:
3220         return cpu;
3221 }
3222
3223 #ifdef CONFIG_RFS_ACCEL
3224
3225 /**
3226  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3227  * @dev: Device on which the filter was set
3228  * @rxq_index: RX queue index
3229  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3230  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3231  *
3232  * Drivers that implement ndo_rx_flow_steer() should periodically call
3233  * this function for each installed filter and remove the filters for
3234  * which it returns %true.
3235  */
3236 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3237                          u32 flow_id, u16 filter_id)
3238 {
3239         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3240         struct rps_dev_flow_table *flow_table;
3241         struct rps_dev_flow *rflow;
3242         bool expire = true;
3243         unsigned int cpu;
3244
3245         rcu_read_lock();
3246         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3247         if (flow_table && flow_id <= flow_table->mask) {
3248                 rflow = &flow_table->flows[flow_id];
3249                 cpu = ACCESS_ONCE(rflow->cpu);
3250                 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3251                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3252                            rflow->last_qtail) <
3253                      (int)(10 * flow_table->mask)))
3254                         expire = false;
3255         }
3256         rcu_read_unlock();
3257         return expire;
3258 }
3259 EXPORT_SYMBOL(rps_may_expire_flow);
3260
3261 #endif /* CONFIG_RFS_ACCEL */
3262
3263 /* Called from hardirq (IPI) context */
3264 static void rps_trigger_softirq(void *data)
3265 {
3266         struct softnet_data *sd = data;
3267
3268         ____napi_schedule(sd, &sd->backlog);
3269         sd->received_rps++;
3270 }
3271
3272 #endif /* CONFIG_RPS */
3273
3274 /*
3275  * Check if this softnet_data structure is another cpu one
3276  * If yes, queue it to our IPI list and return 1
3277  * If no, return 0
3278  */
3279 static int rps_ipi_queued(struct softnet_data *sd)
3280 {
3281 #ifdef CONFIG_RPS
3282         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3283
3284         if (sd != mysd) {
3285                 sd->rps_ipi_next = mysd->rps_ipi_list;
3286                 mysd->rps_ipi_list = sd;
3287
3288                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3289                 return 1;
3290         }
3291 #endif /* CONFIG_RPS */
3292         return 0;
3293 }
3294
3295 #ifdef CONFIG_NET_FLOW_LIMIT
3296 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3297 #endif
3298
3299 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3300 {
3301 #ifdef CONFIG_NET_FLOW_LIMIT
3302         struct sd_flow_limit *fl;
3303         struct softnet_data *sd;
3304         unsigned int old_flow, new_flow;
3305
3306         if (qlen < (netdev_max_backlog >> 1))
3307                 return false;
3308
3309         sd = this_cpu_ptr(&softnet_data);
3310
3311         rcu_read_lock();
3312         fl = rcu_dereference(sd->flow_limit);
3313         if (fl) {
3314                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3315                 old_flow = fl->history[fl->history_head];
3316                 fl->history[fl->history_head] = new_flow;
3317
3318                 fl->history_head++;
3319                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3320
3321                 if (likely(fl->buckets[old_flow]))
3322                         fl->buckets[old_flow]--;
3323
3324                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3325                         fl->count++;
3326                         rcu_read_unlock();
3327                         return true;
3328                 }
3329         }
3330         rcu_read_unlock();
3331 #endif
3332         return false;
3333 }
3334
3335 /*
3336  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3337  * queue (may be a remote CPU queue).
3338  */
3339 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3340                               unsigned int *qtail)
3341 {
3342         struct softnet_data *sd;
3343         unsigned long flags;
3344         unsigned int qlen;
3345
3346         sd = &per_cpu(softnet_data, cpu);
3347
3348         local_irq_save(flags);
3349
3350         rps_lock(sd);
3351         qlen = skb_queue_len(&sd->input_pkt_queue);
3352         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3353                 if (qlen) {
3354 enqueue:
3355                         __skb_queue_tail(&sd->input_pkt_queue, skb);
3356                         input_queue_tail_incr_save(sd, qtail);
3357                         rps_unlock(sd);
3358                         local_irq_restore(flags);
3359                         return NET_RX_SUCCESS;
3360                 }
3361
3362                 /* Schedule NAPI for backlog device
3363                  * We can use non atomic operation since we own the queue lock
3364                  */
3365                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3366                         if (!rps_ipi_queued(sd))
3367                                 ____napi_schedule(sd, &sd->backlog);
3368                 }
3369                 goto enqueue;
3370         }
3371
3372         sd->dropped++;
3373         rps_unlock(sd);
3374
3375         local_irq_restore(flags);
3376
3377         atomic_long_inc(&skb->dev->rx_dropped);
3378         kfree_skb(skb);
3379         return NET_RX_DROP;
3380 }
3381
3382 static int netif_rx_internal(struct sk_buff *skb)
3383 {
3384         int ret;
3385
3386         net_timestamp_check(netdev_tstamp_prequeue, skb);
3387
3388         trace_netif_rx(skb);
3389 #ifdef CONFIG_RPS
3390         if (static_key_false(&rps_needed)) {
3391                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3392                 int cpu;
3393
3394                 preempt_disable();
3395                 rcu_read_lock();
3396
3397                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3398                 if (cpu < 0)
3399                         cpu = smp_processor_id();
3400
3401                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3402
3403                 rcu_read_unlock();
3404                 preempt_enable();
3405         } else
3406 #endif
3407         {
3408                 unsigned int qtail;
3409                 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3410                 put_cpu();
3411         }
3412         return ret;
3413 }
3414
3415 /**
3416  *      netif_rx        -       post buffer to the network code
3417  *      @skb: buffer to post
3418  *
3419  *      This function receives a packet from a device driver and queues it for
3420  *      the upper (protocol) levels to process.  It always succeeds. The buffer
3421  *      may be dropped during processing for congestion control or by the
3422  *      protocol layers.
3423  *
3424  *      return values:
3425  *      NET_RX_SUCCESS  (no congestion)
3426  *      NET_RX_DROP     (packet was dropped)
3427  *
3428  */
3429
3430 int netif_rx(struct sk_buff *skb)
3431 {
3432         trace_netif_rx_entry(skb);
3433
3434         return netif_rx_internal(skb);
3435 }
3436 EXPORT_SYMBOL(netif_rx);
3437
3438 int netif_rx_ni(struct sk_buff *skb)
3439 {
3440         int err;
3441
3442         trace_netif_rx_ni_entry(skb);
3443
3444         preempt_disable();
3445         err = netif_rx_internal(skb);
3446         if (local_softirq_pending())
3447                 do_softirq();
3448         preempt_enable();
3449
3450         return err;
3451 }
3452 EXPORT_SYMBOL(netif_rx_ni);
3453
3454 static void net_tx_action(struct softirq_action *h)
3455 {
3456         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3457
3458         if (sd->completion_queue) {
3459                 struct sk_buff *clist;
3460
3461                 local_irq_disable();
3462                 clist = sd->completion_queue;
3463                 sd->completion_queue = NULL;
3464                 local_irq_enable();
3465
3466                 while (clist) {
3467                         struct sk_buff *skb = clist;
3468                         clist = clist->next;
3469
3470                         WARN_ON(atomic_read(&skb->users));
3471                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3472                                 trace_consume_skb(skb);
3473                         else
3474                                 trace_kfree_skb(skb, net_tx_action);
3475                         __kfree_skb(skb);
3476                 }
3477         }
3478
3479         if (sd->output_queue) {
3480                 struct Qdisc *head;
3481
3482                 local_irq_disable();
3483                 head = sd->output_queue;
3484                 sd->output_queue = NULL;
3485                 sd->output_queue_tailp = &sd->output_queue;
3486                 local_irq_enable();
3487
3488                 while (head) {
3489                         struct Qdisc *q = head;
3490                         spinlock_t *root_lock;
3491
3492                         head = head->next_sched;
3493
3494                         root_lock = qdisc_lock(q);
3495                         if (spin_trylock(root_lock)) {
3496                                 smp_mb__before_atomic();
3497                                 clear_bit(__QDISC_STATE_SCHED,
3498                                           &q->state);
3499                                 qdisc_run(q);
3500                                 spin_unlock(root_lock);
3501                         } else {
3502                                 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3503                                               &q->state)) {
3504                                         __netif_reschedule(q);
3505                                 } else {
3506                                         smp_mb__before_atomic();
3507                                         clear_bit(__QDISC_STATE_SCHED,
3508                                                   &q->state);
3509                                 }
3510                         }
3511                 }
3512         }
3513 }
3514
3515 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3516     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3517 /* This hook is defined here for ATM LANE */
3518 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3519                              unsigned char *addr) __read_mostly;
3520 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3521 #endif
3522
3523 #ifdef CONFIG_NET_CLS_ACT
3524 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3525                                          struct packet_type **pt_prev,
3526                                          int *ret, struct net_device *orig_dev)
3527 {
3528         struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3529         struct tcf_result cl_res;
3530
3531         /* If there's at least one ingress present somewhere (so
3532          * we get here via enabled static key), remaining devices
3533          * that are not configured with an ingress qdisc will bail
3534          * out here.
3535          */
3536         if (!cl)
3537                 return skb;
3538         if (*pt_prev) {
3539                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3540                 *pt_prev = NULL;
3541         }
3542
3543         qdisc_bstats_update_cpu(cl->q, skb);
3544         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3545
3546         switch (tc_classify(skb, cl, &cl_res)) {
3547         case TC_ACT_OK:
3548         case TC_ACT_RECLASSIFY:
3549                 skb->tc_index = TC_H_MIN(cl_res.classid);
3550                 break;
3551         case TC_ACT_SHOT:
3552                 qdisc_qstats_drop_cpu(cl->q);
3553         case TC_ACT_STOLEN:
3554         case TC_ACT_QUEUED:
3555                 kfree_skb(skb);
3556                 return NULL;
3557         default:
3558                 break;
3559         }
3560
3561         return skb;
3562 }
3563 #endif
3564
3565 /**
3566  *      netdev_rx_handler_register - register receive handler
3567  *      @dev: device to register a handler for
3568  *      @rx_handler: receive handler to register
3569  *      @rx_handler_data: data pointer that is used by rx handler
3570  *
3571  *      Register a receive handler for a device. This handler will then be
3572  *      called from __netif_receive_skb. A negative errno code is returned
3573  *      on a failure.
3574  *
3575  *      The caller must hold the rtnl_mutex.
3576  *
3577  *      For a general description of rx_handler, see enum rx_handler_result.
3578  */
3579 int netdev_rx_handler_register(struct net_device *dev,
3580                                rx_handler_func_t *rx_handler,
3581                                void *rx_handler_data)
3582 {
3583         ASSERT_RTNL();
3584
3585         if (dev->rx_handler)
3586                 return -EBUSY;
3587
3588         /* Note: rx_handler_data must be set before rx_handler */
3589         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3590         rcu_assign_pointer(dev->rx_handler, rx_handler);
3591
3592         return 0;
3593 }
3594 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3595
3596 /**
3597  *      netdev_rx_handler_unregister - unregister receive handler
3598  *      @dev: device to unregister a handler from
3599  *
3600  *      Unregister a receive handler from a device.
3601  *
3602  *      The caller must hold the rtnl_mutex.
3603  */
3604 void netdev_rx_handler_unregister(struct net_device *dev)
3605 {
3606
3607         ASSERT_RTNL();
3608         RCU_INIT_POINTER(dev->rx_handler, NULL);
3609         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3610          * section has a guarantee to see a non NULL rx_handler_data
3611          * as well.
3612          */
3613         synchronize_net();
3614         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3615 }
3616 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3617
3618 /*
3619  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3620  * the special handling of PFMEMALLOC skbs.
3621  */
3622 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3623 {
3624         switch (skb->protocol) {
3625         case htons(ETH_P_ARP):
3626         case htons(ETH_P_IP):
3627         case htons(ETH_P_IPV6):
3628         case htons(ETH_P_8021Q):
3629         case htons(ETH_P_8021AD):
3630                 return true;
3631         default:
3632                 return false;
3633         }
3634 }
3635
3636 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3637 {
3638         struct packet_type *ptype, *pt_prev;
3639         rx_handler_func_t *rx_handler;
3640         struct net_device *orig_dev;
3641         bool deliver_exact = false;
3642         int ret = NET_RX_DROP;
3643         __be16 type;
3644
3645         net_timestamp_check(!netdev_tstamp_prequeue, skb);
3646
3647         trace_netif_receive_skb(skb);
3648
3649         orig_dev = skb->dev;
3650
3651         skb_reset_network_header(skb);
3652         if (!skb_transport_header_was_set(skb))
3653                 skb_reset_transport_header(skb);
3654         skb_reset_mac_len(skb);
3655
3656         pt_prev = NULL;
3657
3658         rcu_read_lock();
3659
3660 another_round:
3661         skb->skb_iif = skb->dev->ifindex;
3662
3663         __this_cpu_inc(softnet_data.processed);
3664
3665         if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3666             skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3667                 skb = skb_vlan_untag(skb);
3668                 if (unlikely(!skb))
3669                         goto unlock;
3670         }
3671
3672 #ifdef CONFIG_NET_CLS_ACT
3673         if (skb->tc_verd & TC_NCLS) {
3674                 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3675                 goto ncls;
3676         }
3677 #endif
3678
3679         if (pfmemalloc)
3680                 goto skip_taps;
3681
3682         list_for_each_entry_rcu(ptype, &ptype_all, list) {
3683                 if (pt_prev)
3684                         ret = deliver_skb(skb, pt_prev, orig_dev);
3685                 pt_prev = ptype;
3686         }
3687
3688         list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3689                 if (pt_prev)
3690                         ret = deliver_skb(skb, pt_prev, orig_dev);
3691                 pt_prev = ptype;
3692         }
3693
3694 skip_taps:
3695 #ifdef CONFIG_NET_CLS_ACT
3696         if (static_key_false(&ingress_needed)) {
3697                 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3698                 if (!skb)
3699                         goto unlock;
3700         }
3701
3702         skb->tc_verd = 0;
3703 ncls:
3704 #endif
3705         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3706                 goto drop;
3707
3708         if (skb_vlan_tag_present(skb)) {
3709                 if (pt_prev) {
3710                         ret = deliver_skb(skb, pt_prev, orig_dev);
3711                         pt_prev = NULL;
3712                 }
3713                 if (vlan_do_receive(&skb))
3714                         goto another_round;
3715                 else if (unlikely(!skb))
3716                         goto unlock;
3717         }
3718
3719         rx_handler = rcu_dereference(skb->dev->rx_handler);
3720         if (rx_handler) {
3721                 if (pt_prev) {
3722                         ret = deliver_skb(skb, pt_prev, orig_dev);
3723                         pt_prev = NULL;
3724                 }
3725                 switch (rx_handler(&skb)) {
3726                 case RX_HANDLER_CONSUMED:
3727                         ret = NET_RX_SUCCESS;
3728                         goto unlock;
3729                 case RX_HANDLER_ANOTHER:
3730                         goto another_round;
3731                 case RX_HANDLER_EXACT:
3732                         deliver_exact = true;
3733                 case RX_HANDLER_PASS:
3734                         break;
3735                 default:
3736                         BUG();
3737                 }
3738         }
3739
3740         if (unlikely(skb_vlan_tag_present(skb))) {
3741                 if (skb_vlan_tag_get_id(skb))
3742                         skb->pkt_type = PACKET_OTHERHOST;
3743                 /* Note: we might in the future use prio bits
3744                  * and set skb->priority like in vlan_do_receive()
3745                  * For the time being, just ignore Priority Code Point
3746                  */
3747                 skb->vlan_tci = 0;
3748         }
3749
3750         type = skb->protocol;
3751
3752         /* deliver only exact match when indicated */
3753         if (likely(!deliver_exact)) {
3754                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3755                                        &ptype_base[ntohs(type) &
3756                                                    PTYPE_HASH_MASK]);
3757         }
3758
3759         deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3760                                &orig_dev->ptype_specific);
3761
3762         if (unlikely(skb->dev != orig_dev)) {
3763                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3764                                        &skb->dev->ptype_specific);
3765         }
3766
3767         if (pt_prev) {
3768                 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3769                         goto drop;
3770                 else
3771                         ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3772         } else {
3773 drop:
3774                 atomic_long_inc(&skb->dev->rx_dropped);
3775                 kfree_skb(skb);
3776                 /* Jamal, now you will not able to escape explaining
3777                  * me how you were going to use this. :-)
3778                  */
3779                 ret = NET_RX_DROP;
3780         }
3781
3782 unlock:
3783         rcu_read_unlock();
3784         return ret;
3785 }
3786
3787 static int __netif_receive_skb(struct sk_buff *skb)
3788 {
3789         int ret;
3790
3791         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3792                 unsigned long pflags = current->flags;
3793
3794                 /*
3795                  * PFMEMALLOC skbs are special, they should
3796                  * - be delivered to SOCK_MEMALLOC sockets only
3797                  * - stay away from userspace
3798                  * - have bounded memory usage
3799                  *
3800                  * Use PF_MEMALLOC as this saves us from propagating the allocation
3801                  * context down to all allocation sites.
3802                  */
3803                 current->flags |= PF_MEMALLOC;
3804                 ret = __netif_receive_skb_core(skb, true);
3805                 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3806         } else
3807                 ret = __netif_receive_skb_core(skb, false);
3808
3809         return ret;
3810 }
3811
3812 static int netif_receive_skb_internal(struct sk_buff *skb)
3813 {
3814         net_timestamp_check(netdev_tstamp_prequeue, skb);
3815
3816         if (skb_defer_rx_timestamp(skb))
3817                 return NET_RX_SUCCESS;
3818
3819 #ifdef CONFIG_RPS
3820         if (static_key_false(&rps_needed)) {
3821                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3822                 int cpu, ret;
3823
3824                 rcu_read_lock();
3825
3826                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3827
3828                 if (cpu >= 0) {
3829                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3830                         rcu_read_unlock();
3831                         return ret;
3832                 }
3833                 rcu_read_unlock();
3834         }
3835 #endif
3836         return __netif_receive_skb(skb);
3837 }
3838
3839 /**
3840  *      netif_receive_skb - process receive buffer from network
3841  *      @skb: buffer to process
3842  *
3843  *      netif_receive_skb() is the main receive data processing function.
3844  *      It always succeeds. The buffer may be dropped during processing
3845  *      for congestion control or by the protocol layers.
3846  *
3847  *      This function may only be called from softirq context and interrupts
3848  *      should be enabled.
3849  *
3850  *      Return values (usually ignored):
3851  *      NET_RX_SUCCESS: no congestion
3852  *      NET_RX_DROP: packet was dropped
3853  */
3854 int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
3855 {
3856         trace_netif_receive_skb_entry(skb);
3857
3858         return netif_receive_skb_internal(skb);
3859 }
3860 EXPORT_SYMBOL(netif_receive_skb_sk);
3861
3862 /* Network device is going away, flush any packets still pending
3863  * Called with irqs disabled.
3864  */
3865 static void flush_backlog(void *arg)
3866 {
3867         struct net_device *dev = arg;
3868         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3869         struct sk_buff *skb, *tmp;
3870
3871         rps_lock(sd);
3872         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3873                 if (skb->dev == dev) {
3874                         __skb_unlink(skb, &sd->input_pkt_queue);
3875                         kfree_skb(skb);
3876                         input_queue_head_incr(sd);
3877                 }
3878         }
3879         rps_unlock(sd);
3880
3881         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3882                 if (skb->dev == dev) {
3883                         __skb_unlink(skb, &sd->process_queue);
3884                         kfree_skb(skb);
3885                         input_queue_head_incr(sd);
3886                 }
3887         }
3888 }
3889
3890 static int napi_gro_complete(struct sk_buff *skb)
3891 {
3892         struct packet_offload *ptype;
3893         __be16 type = skb->protocol;
3894         struct list_head *head = &offload_base;
3895         int err = -ENOENT;
3896
3897         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3898
3899         if (NAPI_GRO_CB(skb)->count == 1) {
3900                 skb_shinfo(skb)->gso_size = 0;
3901                 goto out;
3902         }
3903
3904         rcu_read_lock();
3905         list_for_each_entry_rcu(ptype, head, list) {
3906                 if (ptype->type != type || !ptype->callbacks.gro_complete)
3907                         continue;
3908
3909                 err = ptype->callbacks.gro_complete(skb, 0);
3910                 break;
3911         }
3912         rcu_read_unlock();
3913
3914         if (err) {
3915                 WARN_ON(&ptype->list == head);
3916                 kfree_skb(skb);
3917                 return NET_RX_SUCCESS;
3918         }
3919
3920 out:
3921         return netif_receive_skb_internal(skb);
3922 }
3923
3924 /* napi->gro_list contains packets ordered by age.
3925  * youngest packets at the head of it.
3926  * Complete skbs in reverse order to reduce latencies.
3927  */
3928 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3929 {
3930         struct sk_buff *skb, *prev = NULL;
3931
3932         /* scan list and build reverse chain */
3933         for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3934                 skb->prev = prev;
3935                 prev = skb;
3936         }
3937
3938         for (skb = prev; skb; skb = prev) {
3939                 skb->next = NULL;
3940
3941                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3942                         return;
3943
3944                 prev = skb->prev;
3945                 napi_gro_complete(skb);
3946                 napi->gro_count--;
3947         }
3948
3949         napi->gro_list = NULL;
3950 }
3951 EXPORT_SYMBOL(napi_gro_flush);
3952
3953 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3954 {
3955         struct sk_buff *p;
3956         unsigned int maclen = skb->dev->hard_header_len;
3957         u32 hash = skb_get_hash_raw(skb);
3958
3959         for (p = napi->gro_list; p; p = p->next) {
3960                 unsigned long diffs;
3961
3962                 NAPI_GRO_CB(p)->flush = 0;
3963
3964                 if (hash != skb_get_hash_raw(p)) {
3965                         NAPI_GRO_CB(p)->same_flow = 0;
3966                         continue;
3967                 }
3968
3969                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3970                 diffs |= p->vlan_tci ^ skb->vlan_tci;
3971                 if (maclen == ETH_HLEN)
3972                         diffs |= compare_ether_header(skb_mac_header(p),
3973                                                       skb_mac_header(skb));
3974                 else if (!diffs)
3975                         diffs = memcmp(skb_mac_header(p),
3976                                        skb_mac_header(skb),
3977                                        maclen);
3978                 NAPI_GRO_CB(p)->same_flow = !diffs;
3979         }
3980 }
3981
3982 static void skb_gro_reset_offset(struct sk_buff *skb)
3983 {
3984         const struct skb_shared_info *pinfo = skb_shinfo(skb);
3985         const skb_frag_t *frag0 = &pinfo->frags[0];
3986
3987         NAPI_GRO_CB(skb)->data_offset = 0;
3988         NAPI_GRO_CB(skb)->frag0 = NULL;
3989         NAPI_GRO_CB(skb)->frag0_len = 0;
3990
3991         if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3992             pinfo->nr_frags &&
3993             !PageHighMem(skb_frag_page(frag0))) {
3994                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3995                 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3996         }
3997 }
3998
3999 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4000 {
4001         struct skb_shared_info *pinfo = skb_shinfo(skb);
4002
4003         BUG_ON(skb->end - skb->tail < grow);
4004
4005         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4006
4007         skb->data_len -= grow;
4008         skb->tail += grow;
4009
4010         pinfo->frags[0].page_offset += grow;
4011         skb_frag_size_sub(&pinfo->frags[0], grow);
4012
4013         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4014                 skb_frag_unref(skb, 0);
4015                 memmove(pinfo->frags, pinfo->frags + 1,
4016                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4017         }
4018 }
4019
4020 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4021 {
4022         struct sk_buff **pp = NULL;
4023         struct packet_offload *ptype;
4024         __be16 type = skb->protocol;
4025         struct list_head *head = &offload_base;
4026         int same_flow;
4027         enum gro_result ret;
4028         int grow;
4029
4030         if (!(skb->dev->features & NETIF_F_GRO))
4031                 goto normal;
4032
4033         if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4034                 goto normal;
4035
4036         gro_list_prepare(napi, skb);
4037
4038         rcu_read_lock();
4039         list_for_each_entry_rcu(ptype, head, list) {
4040                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4041                         continue;
4042
4043                 skb_set_network_header(skb, skb_gro_offset(skb));
4044                 skb_reset_mac_len(skb);
4045                 NAPI_GRO_CB(skb)->same_flow = 0;
4046                 NAPI_GRO_CB(skb)->flush = 0;
4047                 NAPI_GRO_CB(skb)->free = 0;
4048                 NAPI_GRO_CB(skb)->udp_mark = 0;
4049                 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4050
4051                 /* Setup for GRO checksum validation */
4052                 switch (skb->ip_summed) {
4053                 case CHECKSUM_COMPLETE:
4054                         NAPI_GRO_CB(skb)->csum = skb->csum;
4055                         NAPI_GRO_CB(skb)->csum_valid = 1;
4056                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4057                         break;
4058                 case CHECKSUM_UNNECESSARY:
4059                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4060                         NAPI_GRO_CB(skb)->csum_valid = 0;
4061                         break;
4062                 default:
4063                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4064                         NAPI_GRO_CB(skb)->csum_valid = 0;
4065                 }
4066
4067                 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4068                 break;
4069         }
4070         rcu_read_unlock();
4071
4072         if (&ptype->list == head)
4073                 goto normal;
4074
4075         same_flow = NAPI_GRO_CB(skb)->same_flow;
4076         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4077
4078         if (pp) {
4079                 struct sk_buff *nskb = *pp;
4080
4081                 *pp = nskb->next;
4082                 nskb->next = NULL;
4083                 napi_gro_complete(nskb);
4084                 napi->gro_count--;
4085         }
4086
4087         if (same_flow)
4088                 goto ok;
4089
4090         if (NAPI_GRO_CB(skb)->flush)
4091                 goto normal;
4092
4093         if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4094                 struct sk_buff *nskb = napi->gro_list;
4095
4096                 /* locate the end of the list to select the 'oldest' flow */
4097                 while (nskb->next) {
4098                         pp = &nskb->next;
4099                         nskb = *pp;
4100                 }
4101                 *pp = NULL;
4102                 nskb->next = NULL;
4103                 napi_gro_complete(nskb);
4104         } else {
4105                 napi->gro_count++;
4106         }
4107         NAPI_GRO_CB(skb)->count = 1;
4108         NAPI_GRO_CB(skb)->age = jiffies;
4109         NAPI_GRO_CB(skb)->last = skb;
4110         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4111         skb->next = napi->gro_list;
4112         napi->gro_list = skb;
4113         ret = GRO_HELD;
4114
4115 pull:
4116         grow = skb_gro_offset(skb) - skb_headlen(skb);
4117         if (grow > 0)
4118                 gro_pull_from_frag0(skb, grow);
4119 ok:
4120         return ret;
4121
4122 normal:
4123         ret = GRO_NORMAL;
4124         goto pull;
4125 }
4126
4127 struct packet_offload *gro_find_receive_by_type(__be16 type)
4128 {
4129         struct list_head *offload_head = &offload_base;
4130         struct packet_offload *ptype;
4131
4132         list_for_each_entry_rcu(ptype, offload_head, list) {
4133                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4134                         continue;
4135                 return ptype;
4136         }
4137         return NULL;
4138 }
4139 EXPORT_SYMBOL(gro_find_receive_by_type);
4140
4141 struct packet_offload *gro_find_complete_by_type(__be16 type)
4142 {
4143         struct list_head *offload_head = &offload_base;
4144         struct packet_offload *ptype;
4145
4146         list_for_each_entry_rcu(ptype, offload_head, list) {
4147                 if (ptype->type != type || !ptype->callbacks.gro_complete)
4148                         continue;
4149                 return ptype;
4150         }
4151         return NULL;
4152 }
4153 EXPORT_SYMBOL(gro_find_complete_by_type);
4154
4155 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4156 {
4157         switch (ret) {
4158         case GRO_NORMAL:
4159                 if (netif_receive_skb_internal(skb))
4160                         ret = GRO_DROP;
4161                 break;
4162
4163         case GRO_DROP:
4164                 kfree_skb(skb);
4165                 break;
4166
4167         case GRO_MERGED_FREE:
4168                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4169                         kmem_cache_free(skbuff_head_cache, skb);
4170                 else
4171                         __kfree_skb(skb);
4172                 break;
4173
4174         case GRO_HELD:
4175         case GRO_MERGED:
4176                 break;
4177         }
4178
4179         return ret;
4180 }
4181
4182 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4183 {
4184         trace_napi_gro_receive_entry(skb);
4185
4186         skb_gro_reset_offset(skb);
4187
4188         return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4189 }
4190 EXPORT_SYMBOL(napi_gro_receive);
4191
4192 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4193 {
4194         if (unlikely(skb->pfmemalloc)) {
4195                 consume_skb(skb);
4196                 return;
4197         }
4198         __skb_pull(skb, skb_headlen(skb));
4199         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4200         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4201         skb->vlan_tci = 0;
4202         skb->dev = napi->dev;
4203         skb->skb_iif = 0;
4204         skb->encapsulation = 0;
4205         skb_shinfo(skb)->gso_type = 0;
4206         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4207
4208         napi->skb = skb;
4209 }
4210
4211 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4212 {
4213         struct sk_buff *skb = napi->skb;
4214
4215         if (!skb) {
4216                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4217                 napi->skb = skb;
4218         }
4219         return skb;
4220 }
4221 EXPORT_SYMBOL(napi_get_frags);
4222
4223 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4224                                       struct sk_buff *skb,
4225                                       gro_result_t ret)
4226 {
4227         switch (ret) {
4228         case GRO_NORMAL:
4229         case GRO_HELD:
4230                 __skb_push(skb, ETH_HLEN);
4231                 skb->protocol = eth_type_trans(skb, skb->dev);
4232                 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4233                         ret = GRO_DROP;
4234                 break;
4235
4236         case GRO_DROP:
4237         case GRO_MERGED_FREE:
4238                 napi_reuse_skb(napi, skb);
4239                 break;
4240
4241         case GRO_MERGED:
4242                 break;
4243         }
4244
4245         return ret;
4246 }
4247
4248 /* Upper GRO stack assumes network header starts at gro_offset=0
4249  * Drivers could call both napi_gro_frags() and napi_gro_receive()
4250  * We copy ethernet header into skb->data to have a common layout.
4251  */
4252 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4253 {
4254         struct sk_buff *skb = napi->skb;
4255         const struct ethhdr *eth;
4256         unsigned int hlen = sizeof(*eth);
4257
4258         napi->skb = NULL;
4259
4260         skb_reset_mac_header(skb);
4261         skb_gro_reset_offset(skb);
4262
4263         eth = skb_gro_header_fast(skb, 0);
4264         if (unlikely(skb_gro_header_hard(skb, hlen))) {
4265                 eth = skb_gro_header_slow(skb, hlen, 0);
4266                 if (unlikely(!eth)) {
4267                         napi_reuse_skb(napi, skb);
4268                         return NULL;
4269                 }
4270         } else {
4271                 gro_pull_from_frag0(skb, hlen);
4272                 NAPI_GRO_CB(skb)->frag0 += hlen;
4273                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4274         }
4275         __skb_pull(skb, hlen);
4276
4277         /*
4278          * This works because the only protocols we care about don't require
4279          * special handling.
4280          * We'll fix it up properly in napi_frags_finish()
4281          */
4282         skb->protocol = eth->h_proto;
4283
4284         return skb;
4285 }
4286
4287 gro_result_t napi_gro_frags(struct napi_struct *napi)
4288 {
4289         struct sk_buff *skb = napi_frags_skb(napi);
4290
4291         if (!skb)
4292                 return GRO_DROP;
4293
4294         trace_napi_gro_frags_entry(skb);
4295
4296         return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4297 }
4298 EXPORT_SYMBOL(napi_gro_frags);
4299
4300 /* Compute the checksum from gro_offset and return the folded value
4301  * after adding in any pseudo checksum.
4302  */
4303 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4304 {
4305         __wsum wsum;
4306         __sum16 sum;
4307
4308         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4309
4310         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4311         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4312         if (likely(!sum)) {
4313                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4314                     !skb->csum_complete_sw)
4315                         netdev_rx_csum_fault(skb->dev);
4316         }
4317
4318         NAPI_GRO_CB(skb)->csum = wsum;
4319         NAPI_GRO_CB(skb)->csum_valid = 1;
4320
4321         return sum;
4322 }
4323 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4324
4325 /*
4326  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4327  * Note: called with local irq disabled, but exits with local irq enabled.
4328  */
4329 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4330 {
4331 #ifdef CONFIG_RPS
4332         struct softnet_data *remsd = sd->rps_ipi_list;
4333
4334         if (remsd) {
4335                 sd->rps_ipi_list = NULL;
4336
4337                 local_irq_enable();
4338
4339                 /* Send pending IPI's to kick RPS processing on remote cpus. */
4340                 while (remsd) {
4341                         struct softnet_data *next = remsd->rps_ipi_next;
4342
4343                         if (cpu_online(remsd->cpu))
4344                                 smp_call_function_single_async(remsd->cpu,
4345                                                            &remsd->csd);
4346                         remsd = next;
4347                 }
4348         } else
4349 #endif
4350                 local_irq_enable();
4351 }
4352
4353 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4354 {
4355 #ifdef CONFIG_RPS
4356         return sd->rps_ipi_list != NULL;
4357 #else
4358         return false;
4359 #endif
4360 }
4361
4362 static int process_backlog(struct napi_struct *napi, int quota)
4363 {
4364         int work = 0;
4365         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4366
4367         /* Check if we have pending ipi, its better to send them now,
4368          * not waiting net_rx_action() end.
4369          */
4370         if (sd_has_rps_ipi_waiting(sd)) {
4371                 local_irq_disable();
4372                 net_rps_action_and_irq_enable(sd);
4373         }
4374
4375         napi->weight = weight_p;
4376         local_irq_disable();
4377         while (1) {
4378                 struct sk_buff *skb;
4379
4380                 while ((skb = __skb_dequeue(&sd->process_queue))) {
4381                         local_irq_enable();
4382                         __netif_receive_skb(skb);
4383                         local_irq_disable();
4384                         input_queue_head_incr(sd);
4385                         if (++work >= quota) {
4386                                 local_irq_enable();
4387                                 return work;
4388                         }
4389                 }
4390
4391                 rps_lock(sd);
4392                 if (skb_queue_empty(&sd->input_pkt_queue)) {
4393                         /*
4394                          * Inline a custom version of __napi_complete().
4395                          * only current cpu owns and manipulates this napi,
4396                          * and NAPI_STATE_SCHED is the only possible flag set
4397                          * on backlog.
4398                          * We can use a plain write instead of clear_bit(),
4399                          * and we dont need an smp_mb() memory barrier.
4400                          */
4401                         napi->state = 0;
4402                         rps_unlock(sd);
4403
4404                         break;
4405                 }
4406
4407                 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4408                                            &sd->process_queue);
4409                 rps_unlock(sd);
4410         }
4411         local_irq_enable();
4412
4413         return work;
4414 }
4415
4416 /**
4417  * __napi_schedule - schedule for receive
4418  * @n: entry to schedule
4419  *
4420  * The entry's receive function will be scheduled to run.
4421  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4422  */
4423 void __napi_schedule(struct napi_struct *n)
4424 {
4425         unsigned long flags;
4426
4427         local_irq_save(flags);
4428         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4429         local_irq_restore(flags);
4430 }
4431 EXPORT_SYMBOL(__napi_schedule);
4432
4433 /**
4434  * __napi_schedule_irqoff - schedule for receive
4435  * @n: entry to schedule
4436  *
4437  * Variant of __napi_schedule() assuming hard irqs are masked
4438  */
4439 void __napi_schedule_irqoff(struct napi_struct *n)
4440 {
4441         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4442 }
4443 EXPORT_SYMBOL(__napi_schedule_irqoff);
4444
4445 void __napi_complete(struct napi_struct *n)
4446 {
4447         BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4448
4449         list_del_init(&n->poll_list);
4450         smp_mb__before_atomic();
4451         clear_bit(NAPI_STATE_SCHED, &n->state);
4452 }
4453 EXPORT_SYMBOL(__napi_complete);
4454
4455 void napi_complete_done(struct napi_struct *n, int work_done)
4456 {
4457         unsigned long flags;
4458
4459         /*
4460          * don't let napi dequeue from the cpu poll list
4461          * just in case its running on a different cpu
4462          */
4463         if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4464                 return;
4465
4466         if (n->gro_list) {
4467                 unsigned long timeout = 0;
4468
4469                 if (work_done)
4470                         timeout = n->dev->gro_flush_timeout;
4471
4472                 if (timeout)
4473                         hrtimer_start(&n->timer, ns_to_ktime(timeout),
4474                                       HRTIMER_MODE_REL_PINNED);
4475                 else
4476                         napi_gro_flush(n, false);
4477         }
4478         if (likely(list_empty(&n->poll_list))) {
4479                 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4480         } else {
4481                 /* If n->poll_list is not empty, we need to mask irqs */
4482                 local_irq_save(flags);
4483                 __napi_complete(n);
4484                 local_irq_restore(flags);
4485         }
4486 }
4487 EXPORT_SYMBOL(napi_complete_done);
4488
4489 /* must be called under rcu_read_lock(), as we dont take a reference */
4490 struct napi_struct *napi_by_id(unsigned int napi_id)
4491 {
4492         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4493         struct napi_struct *napi;
4494
4495         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4496                 if (napi->napi_id == napi_id)
4497                         return napi;
4498
4499         return NULL;
4500 }
4501 EXPORT_SYMBOL_GPL(napi_by_id);
4502
4503 void napi_hash_add(struct napi_struct *napi)
4504 {
4505         if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4506
4507                 spin_lock(&napi_hash_lock);
4508
4509                 /* 0 is not a valid id, we also skip an id that is taken
4510                  * we expect both events to be extremely rare
4511                  */
4512                 napi->napi_id = 0;
4513                 while (!napi->napi_id) {
4514                         napi->napi_id = ++napi_gen_id;
4515                         if (napi_by_id(napi->napi_id))
4516                                 napi->napi_id = 0;
4517                 }
4518
4519                 hlist_add_head_rcu(&napi->napi_hash_node,
4520                         &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4521
4522                 spin_unlock(&napi_hash_lock);
4523         }
4524 }
4525 EXPORT_SYMBOL_GPL(napi_hash_add);
4526
4527 /* Warning : caller is responsible to make sure rcu grace period
4528  * is respected before freeing memory containing @napi
4529  */
4530 void napi_hash_del(struct napi_struct *napi)
4531 {
4532         spin_lock(&napi_hash_lock);
4533
4534         if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4535                 hlist_del_rcu(&napi->napi_hash_node);
4536
4537         spin_unlock(&napi_hash_lock);
4538 }
4539 EXPORT_SYMBOL_GPL(napi_hash_del);
4540
4541 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4542 {
4543         struct napi_struct *napi;
4544
4545         napi = container_of(timer, struct napi_struct, timer);
4546         if (napi->gro_list)
4547                 napi_schedule(napi);
4548
4549         return HRTIMER_NORESTART;
4550 }
4551
4552 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4553                     int (*poll)(struct napi_struct *, int), int weight)
4554 {
4555         INIT_LIST_HEAD(&napi->poll_list);
4556         hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4557         napi->timer.function = napi_watchdog;
4558         napi->gro_count = 0;
4559         napi->gro_list = NULL;
4560         napi->skb = NULL;
4561         napi->poll = poll;
4562         if (weight > NAPI_POLL_WEIGHT)
4563                 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4564                             weight, dev->name);
4565         napi->weight = weight;
4566         list_add(&napi->dev_list, &dev->napi_list);
4567         napi->dev = dev;
4568 #ifdef CONFIG_NETPOLL
4569         spin_lock_init(&napi->poll_lock);
4570         napi->poll_owner = -1;
4571 #endif
4572         set_bit(NAPI_STATE_SCHED, &napi->state);
4573 }
4574 EXPORT_SYMBOL(netif_napi_add);
4575
4576 void napi_disable(struct napi_struct *n)
4577 {
4578         might_sleep();
4579         set_bit(NAPI_STATE_DISABLE, &n->state);
4580
4581         while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4582                 msleep(1);
4583
4584         hrtimer_cancel(&n->timer);
4585
4586         clear_bit(NAPI_STATE_DISABLE, &n->state);
4587 }
4588 EXPORT_SYMBOL(napi_disable);
4589
4590 void netif_napi_del(struct napi_struct *napi)
4591 {
4592         list_del_init(&napi->dev_list);
4593         napi_free_frags(napi);
4594
4595         kfree_skb_list(napi->gro_list);
4596         napi->gro_list = NULL;
4597         napi->gro_count = 0;
4598 }
4599 EXPORT_SYMBOL(netif_napi_del);
4600
4601 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4602 {
4603         void *have;
4604         int work, weight;
4605
4606         list_del_init(&n->poll_list);
4607
4608         have = netpoll_poll_lock(n);
4609
4610         weight = n->weight;
4611
4612         /* This NAPI_STATE_SCHED test is for avoiding a race
4613          * with netpoll's poll_napi().  Only the entity which
4614          * obtains the lock and sees NAPI_STATE_SCHED set will
4615          * actually make the ->poll() call.  Therefore we avoid
4616          * accidentally calling ->poll() when NAPI is not scheduled.
4617          */
4618         work = 0;
4619         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4620                 work = n->poll(n, weight);
4621                 trace_napi_poll(n);
4622         }
4623
4624         WARN_ON_ONCE(work > weight);
4625
4626         if (likely(work < weight))
4627                 goto out_unlock;
4628
4629         /* Drivers must not modify the NAPI state if they
4630          * consume the entire weight.  In such cases this code
4631          * still "owns" the NAPI instance and therefore can
4632          * move the instance around on the list at-will.
4633          */
4634         if (unlikely(napi_disable_pending(n))) {
4635                 napi_complete(n);
4636                 goto out_unlock;
4637         }
4638
4639         if (n->gro_list) {
4640                 /* flush too old packets
4641                  * If HZ < 1000, flush all packets.
4642                  */
4643                 napi_gro_flush(n, HZ >= 1000);
4644         }
4645
4646         /* Some drivers may have called napi_schedule
4647          * prior to exhausting their budget.
4648          */
4649         if (unlikely(!list_empty(&n->poll_list))) {
4650                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4651                              n->dev ? n->dev->name : "backlog");
4652                 goto out_unlock;
4653         }
4654
4655         list_add_tail(&n->poll_list, repoll);
4656
4657 out_unlock:
4658         netpoll_poll_unlock(have);
4659
4660         return work;
4661 }
4662
4663 static void net_rx_action(struct softirq_action *h)
4664 {
4665         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4666         unsigned long time_limit = jiffies + 2;
4667         int budget = netdev_budget;
4668         LIST_HEAD(list);
4669         LIST_HEAD(repoll);
4670
4671         local_irq_disable();
4672         list_splice_init(&sd->poll_list, &list);
4673         local_irq_enable();
4674
4675         for (;;) {
4676                 struct napi_struct *n;
4677
4678                 if (list_empty(&list)) {
4679                         if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4680                                 return;
4681                         break;
4682                 }
4683
4684                 n = list_first_entry(&list, struct napi_struct, poll_list);
4685                 budget -= napi_poll(n, &repoll);
4686
4687                 /* If softirq window is exhausted then punt.
4688                  * Allow this to run for 2 jiffies since which will allow
4689                  * an average latency of 1.5/HZ.
4690                  */
4691                 if (unlikely(budget <= 0 ||
4692                              time_after_eq(jiffies, time_limit))) {
4693                         sd->time_squeeze++;
4694                         break;
4695                 }
4696         }
4697
4698         local_irq_disable();
4699
4700         list_splice_tail_init(&sd->poll_list, &list);
4701         list_splice_tail(&repoll, &list);
4702         list_splice(&list, &sd->poll_list);
4703         if (!list_empty(&sd->poll_list))
4704                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4705
4706         net_rps_action_and_irq_enable(sd);
4707 }
4708
4709 struct netdev_adjacent {
4710         struct net_device *dev;
4711
4712         /* upper master flag, there can only be one master device per list */
4713         bool master;
4714
4715         /* counter for the number of times this device was added to us */
4716         u16 ref_nr;
4717
4718         /* private field for the users */
4719         void *private;
4720
4721         struct list_head list;
4722         struct rcu_head rcu;
4723 };
4724
4725 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4726                                                  struct net_device *adj_dev,
4727                                                  struct list_head *adj_list)
4728 {
4729         struct netdev_adjacent *adj;
4730
4731         list_for_each_entry(adj, adj_list, list) {
4732                 if (adj->dev == adj_dev)
4733                         return adj;
4734         }
4735         return NULL;
4736 }
4737
4738 /**
4739  * netdev_has_upper_dev - Check if device is linked to an upper device
4740  * @dev: device
4741  * @upper_dev: upper device to check
4742  *
4743  * Find out if a device is linked to specified upper device and return true
4744  * in case it is. Note that this checks only immediate upper device,
4745  * not through a complete stack of devices. The caller must hold the RTNL lock.
4746  */
4747 bool netdev_has_upper_dev(struct net_device *dev,
4748                           struct net_device *upper_dev)
4749 {
4750         ASSERT_RTNL();
4751
4752         return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4753 }
4754 EXPORT_SYMBOL(netdev_has_upper_dev);
4755
4756 /**
4757  * netdev_has_any_upper_dev - Check if device is linked to some device
4758  * @dev: device
4759  *
4760  * Find out if a device is linked to an upper device and return true in case
4761  * it is. The caller must hold the RTNL lock.
4762  */
4763 static bool netdev_has_any_upper_dev(struct net_device *dev)
4764 {
4765         ASSERT_RTNL();
4766
4767         return !list_empty(&dev->all_adj_list.upper);
4768 }
4769
4770 /**
4771  * netdev_master_upper_dev_get - Get master upper device
4772  * @dev: device
4773  *
4774  * Find a master upper device and return pointer to it or NULL in case
4775  * it's not there. The caller must hold the RTNL lock.
4776  */
4777 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4778 {
4779         struct netdev_adjacent *upper;
4780
4781         ASSERT_RTNL();
4782
4783         if (list_empty(&dev->adj_list.upper))
4784                 return NULL;
4785
4786         upper = list_first_entry(&dev->adj_list.upper,
4787                                  struct netdev_adjacent, list);
4788         if (likely(upper->master))
4789                 return upper->dev;
4790         return NULL;
4791 }
4792 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4793
4794 void *netdev_adjacent_get_private(struct list_head *adj_list)
4795 {
4796         struct netdev_adjacent *adj;
4797
4798         adj = list_entry(adj_list, struct netdev_adjacent, list);
4799
4800         return adj->private;
4801 }
4802 EXPORT_SYMBOL(netdev_adjacent_get_private);
4803
4804 /**
4805  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4806  * @dev: device
4807  * @iter: list_head ** of the current position
4808  *
4809  * Gets the next device from the dev's upper list, starting from iter
4810  * position. The caller must hold RCU read lock.
4811  */
4812 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4813                                                  struct list_head **iter)
4814 {
4815         struct netdev_adjacent *upper;
4816
4817         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4818
4819         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4820
4821         if (&upper->list == &dev->adj_list.upper)
4822                 return NULL;
4823
4824         *iter = &upper->list;
4825
4826         return upper->dev;
4827 }
4828 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4829
4830 /**
4831  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4832  * @dev: device
4833  * @iter: list_head ** of the current position
4834  *
4835  * Gets the next device from the dev's upper list, starting from iter
4836  * position. The caller must hold RCU read lock.
4837  */
4838 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4839                                                      struct list_head **iter)
4840 {
4841         struct netdev_adjacent *upper;
4842
4843         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4844
4845         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4846
4847         if (&upper->list == &dev->all_adj_list.upper)
4848                 return NULL;
4849
4850         *iter = &upper->list;
4851
4852         return upper->dev;
4853 }
4854 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4855
4856 /**
4857  * netdev_lower_get_next_private - Get the next ->private from the
4858  *                                 lower neighbour list
4859  * @dev: device
4860  * @iter: list_head ** of the current position
4861  *
4862  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4863  * list, starting from iter position. The caller must hold either hold the
4864  * RTNL lock or its own locking that guarantees that the neighbour lower
4865  * list will remain unchainged.
4866  */
4867 void *netdev_lower_get_next_private(struct net_device *dev,
4868                                     struct list_head **iter)
4869 {
4870         struct netdev_adjacent *lower;
4871
4872         lower = list_entry(*iter, struct netdev_adjacent, list);
4873
4874         if (&lower->list == &dev->adj_list.lower)
4875                 return NULL;
4876
4877         *iter = lower->list.next;
4878
4879         return lower->private;
4880 }
4881 EXPORT_SYMBOL(netdev_lower_get_next_private);
4882
4883 /**
4884  * netdev_lower_get_next_private_rcu - Get the next ->private from the
4885  *                                     lower neighbour list, RCU
4886  *                                     variant
4887  * @dev: device
4888  * @iter: list_head ** of the current position
4889  *
4890  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4891  * list, starting from iter position. The caller must hold RCU read lock.
4892  */
4893 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4894                                         struct list_head **iter)
4895 {
4896         struct netdev_adjacent *lower;
4897
4898         WARN_ON_ONCE(!rcu_read_lock_held());
4899
4900         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4901
4902         if (&lower->list == &dev->adj_list.lower)
4903                 return NULL;
4904
4905         *iter = &lower->list;
4906
4907         return lower->private;
4908 }
4909 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4910
4911 /**
4912  * netdev_lower_get_next - Get the next device from the lower neighbour
4913  *                         list
4914  * @dev: device
4915  * @iter: list_head ** of the current position
4916  *
4917  * Gets the next netdev_adjacent from the dev's lower neighbour
4918  * list, starting from iter position. The caller must hold RTNL lock or
4919  * its own locking that guarantees that the neighbour lower
4920  * list will remain unchainged.
4921  */
4922 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4923 {
4924         struct netdev_adjacent *lower;
4925
4926         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4927
4928         if (&lower->list == &dev->adj_list.lower)
4929                 return NULL;
4930
4931         *iter = &lower->list;
4932
4933         return lower->dev;
4934 }
4935 EXPORT_SYMBOL(netdev_lower_get_next);
4936
4937 /**
4938  * netdev_lower_get_first_private_rcu - Get the first ->private from the
4939  *                                     lower neighbour list, RCU
4940  *                                     variant
4941  * @dev: device
4942  *
4943  * Gets the first netdev_adjacent->private from the dev's lower neighbour
4944  * list. The caller must hold RCU read lock.
4945  */
4946 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4947 {
4948         struct netdev_adjacent *lower;
4949
4950         lower = list_first_or_null_rcu(&dev->adj_list.lower,
4951                         struct netdev_adjacent, list);
4952         if (lower)
4953                 return lower->private;
4954         return NULL;
4955 }
4956 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4957
4958 /**
4959  * netdev_master_upper_dev_get_rcu - Get master upper device
4960  * @dev: device
4961  *
4962  * Find a master upper device and return pointer to it or NULL in case
4963  * it's not there. The caller must hold the RCU read lock.
4964  */
4965 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4966 {
4967         struct netdev_adjacent *upper;
4968
4969         upper = list_first_or_null_rcu(&dev->adj_list.upper,
4970                                        struct netdev_adjacent, list);
4971         if (upper && likely(upper->master))
4972                 return upper->dev;
4973         return NULL;
4974 }
4975 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4976
4977 static int netdev_adjacent_sysfs_add(struct net_device *dev,
4978                               struct net_device *adj_dev,
4979                               struct list_head *dev_list)
4980 {
4981         char linkname[IFNAMSIZ+7];
4982         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4983                 "upper_%s" : "lower_%s", adj_dev->name);
4984         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4985                                  linkname);
4986 }
4987 static void netdev_adjacent_sysfs_del(struct net_device *dev,
4988                                char *name,
4989                                struct list_head *dev_list)
4990 {
4991         char linkname[IFNAMSIZ+7];
4992         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4993                 "upper_%s" : "lower_%s", name);
4994         sysfs_remove_link(&(dev->dev.kobj), linkname);
4995 }
4996
4997 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
4998                                                  struct net_device *adj_dev,
4999                                                  struct list_head *dev_list)
5000 {
5001         return (dev_list == &dev->adj_list.upper ||
5002                 dev_list == &dev->adj_list.lower) &&
5003                 net_eq(dev_net(dev), dev_net(adj_dev));
5004 }
5005
5006 static int __netdev_adjacent_dev_insert(struct net_device *dev,
5007                                         struct net_device *adj_dev,
5008                                         struct list_head *dev_list,
5009                                         void *private, bool master)
5010 {
5011         struct netdev_adjacent *adj;
5012         int ret;
5013
5014         adj = __netdev_find_adj(dev, adj_dev, dev_list);
5015
5016         if (adj) {
5017                 adj->ref_nr++;
5018                 return 0;
5019         }
5020
5021         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5022         if (!adj)
5023                 return -ENOMEM;
5024
5025         adj->dev = adj_dev;
5026         adj->master = master;
5027         adj->ref_nr = 1;
5028         adj->private = private;
5029         dev_hold(adj_dev);
5030
5031         pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5032                  adj_dev->name, dev->name, adj_dev->name);
5033
5034         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
5035                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5036                 if (ret)
5037                         goto free_adj;
5038         }
5039
5040         /* Ensure that master link is always the first item in list. */
5041         if (master) {
5042                 ret = sysfs_create_link(&(dev->dev.kobj),
5043                                         &(adj_dev->dev.kobj), "master");
5044                 if (ret)
5045                         goto remove_symlinks;
5046
5047                 list_add_rcu(&adj->list, dev_list);
5048         } else {
5049                 list_add_tail_rcu(&adj->list, dev_list);
5050         }
5051
5052         return 0;
5053
5054 remove_symlinks:
5055         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5056                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5057 free_adj:
5058         kfree(adj);
5059         dev_put(adj_dev);
5060
5061         return ret;
5062 }
5063
5064 static void __netdev_adjacent_dev_remove(struct net_device *dev,
5065                                          struct net_device *adj_dev,
5066                                          struct list_head *dev_list)
5067 {
5068         struct netdev_adjacent *adj;
5069
5070         adj = __netdev_find_adj(dev, adj_dev, dev_list);
5071
5072         if (!adj) {
5073                 pr_err("tried to remove device %s from %s\n",
5074                        dev->name, adj_dev->name);
5075                 BUG();
5076         }
5077
5078         if (adj->ref_nr > 1) {
5079                 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5080                          adj->ref_nr-1);
5081                 adj->ref_nr--;
5082                 return;
5083         }
5084
5085         if (adj->master)
5086                 sysfs_remove_link(&(dev->dev.kobj), "master");
5087
5088         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5089                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5090
5091         list_del_rcu(&adj->list);
5092         pr_debug("dev_put for %s, because link removed from %s to %s\n",
5093                  adj_dev->name, dev->name, adj_dev->name);
5094         dev_put(adj_dev);
5095         kfree_rcu(adj, rcu);
5096 }
5097
5098 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5099                                             struct net_device *upper_dev,
5100                                             struct list_head *up_list,
5101                                             struct list_head *down_list,
5102                                             void *private, bool master)
5103 {
5104         int ret;
5105
5106         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5107                                            master);
5108         if (ret)
5109                 return ret;
5110
5111         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5112                                            false);
5113         if (ret) {
5114                 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5115                 return ret;
5116         }
5117
5118         return 0;
5119 }
5120
5121 static int __netdev_adjacent_dev_link(struct net_device *dev,
5122                                       struct net_device *upper_dev)
5123 {
5124         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5125                                                 &dev->all_adj_list.upper,
5126                                                 &upper_dev->all_adj_list.lower,
5127                                                 NULL, false);
5128 }
5129
5130 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5131                                                struct net_device *upper_dev,
5132                                                struct list_head *up_list,
5133                                                struct list_head *down_list)
5134 {
5135         __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5136         __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5137 }
5138
5139 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5140                                          struct net_device *upper_dev)
5141 {
5142         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5143                                            &dev->all_adj_list.upper,
5144                                            &upper_dev->all_adj_list.lower);
5145 }
5146
5147 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5148                                                 struct net_device *upper_dev,
5149                                                 void *private, bool master)
5150 {
5151         int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5152
5153         if (ret)
5154                 return ret;
5155
5156         ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5157                                                &dev->adj_list.upper,
5158                                                &upper_dev->adj_list.lower,
5159                                                private, master);
5160         if (ret) {
5161                 __netdev_adjacent_dev_unlink(dev, upper_dev);
5162                 return ret;
5163         }
5164
5165         return 0;
5166 }
5167
5168 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5169                                                    struct net_device *upper_dev)
5170 {
5171         __netdev_adjacent_dev_unlink(dev, upper_dev);
5172         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5173                                            &dev->adj_list.upper,
5174                                            &upper_dev->adj_list.lower);
5175 }
5176
5177 static int __netdev_upper_dev_link(struct net_device *dev,
5178                                    struct net_device *upper_dev, bool master,
5179                                    void *private)
5180 {
5181         struct netdev_adjacent *i, *j, *to_i, *to_j;
5182         int ret = 0;
5183
5184         ASSERT_RTNL();
5185
5186         if (dev == upper_dev)
5187                 return -EBUSY;
5188
5189         /* To prevent loops, check if dev is not upper device to upper_dev. */
5190         if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
5191                 return -EBUSY;
5192
5193         if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
5194                 return -EEXIST;
5195
5196         if (master && netdev_master_upper_dev_get(dev))
5197                 return -EBUSY;
5198
5199         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5200                                                    master);
5201         if (ret)
5202                 return ret;
5203
5204         /* Now that we linked these devs, make all the upper_dev's
5205          * all_adj_list.upper visible to every dev's all_adj_list.lower an
5206          * versa, and don't forget the devices itself. All of these
5207          * links are non-neighbours.
5208          */
5209         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5210                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5211                         pr_debug("Interlinking %s with %s, non-neighbour\n",
5212                                  i->dev->name, j->dev->name);
5213                         ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5214                         if (ret)
5215                                 goto rollback_mesh;
5216                 }
5217         }
5218
5219         /* add dev to every upper_dev's upper device */
5220         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5221                 pr_debug("linking %s's upper device %s with %s\n",
5222                          upper_dev->name, i->dev->name, dev->name);
5223                 ret = __netdev_adjacent_dev_link(dev, i->dev);
5224                 if (ret)
5225                         goto rollback_upper_mesh;
5226         }
5227
5228         /* add upper_dev to every dev's lower device */
5229         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5230                 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5231                          i->dev->name, upper_dev->name);
5232                 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5233                 if (ret)
5234                         goto rollback_lower_mesh;
5235         }
5236
5237         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5238         return 0;
5239
5240 rollback_lower_mesh:
5241         to_i = i;
5242         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5243                 if (i == to_i)
5244                         break;
5245                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5246         }
5247
5248         i = NULL;
5249
5250 rollback_upper_mesh:
5251         to_i = i;
5252         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5253                 if (i == to_i)
5254                         break;
5255                 __netdev_adjacent_dev_unlink(dev, i->dev);
5256         }
5257
5258         i = j = NULL;
5259
5260 rollback_mesh:
5261         to_i = i;
5262         to_j = j;
5263         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5264                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5265                         if (i == to_i && j == to_j)
5266                                 break;
5267                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5268                 }
5269                 if (i == to_i)
5270                         break;
5271         }
5272
5273         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5274
5275         return ret;
5276 }
5277
5278 /**
5279  * netdev_upper_dev_link - Add a link to the upper device
5280  * @dev: device
5281  * @upper_dev: new upper device
5282  *
5283  * Adds a link to device which is upper to this one. The caller must hold
5284  * the RTNL lock. On a failure a negative errno code is returned.
5285  * On success the reference counts are adjusted and the function
5286  * returns zero.
5287  */
5288 int netdev_upper_dev_link(struct net_device *dev,
5289                           struct net_device *upper_dev)
5290 {
5291         return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5292 }
5293 EXPORT_SYMBOL(netdev_upper_dev_link);
5294
5295 /**
5296  * netdev_master_upper_dev_link - Add a master link to the upper device
5297  * @dev: device
5298  * @upper_dev: new upper device
5299  *
5300  * Adds a link to device which is upper to this one. In this case, only
5301  * one master upper device can be linked, although other non-master devices
5302  * might be linked as well. The caller must hold the RTNL lock.
5303  * On a failure a negative errno code is returned. On success the reference
5304  * counts are adjusted and the function returns zero.
5305  */
5306 int netdev_master_upper_dev_link(struct net_device *dev,
5307                                  struct net_device *upper_dev)
5308 {
5309         return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5310 }
5311 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5312
5313 int netdev_master_upper_dev_link_private(struct net_device *dev,
5314                                          struct net_device *upper_dev,
5315                                          void *private)
5316 {
5317         return __netdev_upper_dev_link(dev, upper_dev, true, private);
5318 }
5319 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5320
5321 /**
5322  * netdev_upper_dev_unlink - Removes a link to upper device
5323  * @dev: device
5324  * @upper_dev: new upper device
5325  *
5326  * Removes a link to device which is upper to this one. The caller must hold
5327  * the RTNL lock.
5328  */
5329 void netdev_upper_dev_unlink(struct net_device *dev,
5330                              struct net_device *upper_dev)
5331 {
5332         struct netdev_adjacent *i, *j;
5333         ASSERT_RTNL();
5334
5335         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5336
5337         /* Here is the tricky part. We must remove all dev's lower
5338          * devices from all upper_dev's upper devices and vice
5339          * versa, to maintain the graph relationship.
5340          */
5341         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5342                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5343                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5344
5345         /* remove also the devices itself from lower/upper device
5346          * list
5347          */
5348         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5349                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5350
5351         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5352                 __netdev_adjacent_dev_unlink(dev, i->dev);
5353
5354         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5355 }
5356 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5357
5358 /**
5359  * netdev_bonding_info_change - Dispatch event about slave change
5360  * @dev: device
5361  * @bonding_info: info to dispatch
5362  *
5363  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5364  * The caller must hold the RTNL lock.
5365  */
5366 void netdev_bonding_info_change(struct net_device *dev,
5367                                 struct netdev_bonding_info *bonding_info)
5368 {
5369         struct netdev_notifier_bonding_info     info;
5370
5371         memcpy(&info.bonding_info, bonding_info,
5372                sizeof(struct netdev_bonding_info));
5373         call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5374                                       &info.info);
5375 }
5376 EXPORT_SYMBOL(netdev_bonding_info_change);
5377
5378 static void netdev_adjacent_add_links(struct net_device *dev)
5379 {
5380         struct netdev_adjacent *iter;
5381
5382         struct net *net = dev_net(dev);
5383
5384         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5385                 if (!net_eq(net,dev_net(iter->dev)))
5386                         continue;
5387                 netdev_adjacent_sysfs_add(iter->dev, dev,
5388                                           &iter->dev->adj_list.lower);
5389                 netdev_adjacent_sysfs_add(dev, iter->dev,
5390                                           &dev->adj_list.upper);
5391         }
5392
5393         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5394                 if (!net_eq(net,dev_net(iter->dev)))
5395                         continue;
5396                 netdev_adjacent_sysfs_add(iter->dev, dev,
5397                                           &iter->dev->adj_list.upper);
5398                 netdev_adjacent_sysfs_add(dev, iter->dev,
5399                                           &dev->adj_list.lower);
5400         }
5401 }
5402
5403 static void netdev_adjacent_del_links(struct net_device *dev)
5404 {
5405         struct netdev_adjacent *iter;
5406
5407         struct net *net = dev_net(dev);
5408
5409         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5410                 if (!net_eq(net,dev_net(iter->dev)))
5411                         continue;
5412                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5413                                           &iter->dev->adj_list.lower);
5414                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5415                                           &dev->adj_list.upper);
5416         }
5417
5418         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5419                 if (!net_eq(net,dev_net(iter->dev)))
5420                         continue;
5421                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5422                                           &iter->dev->adj_list.upper);
5423                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5424                                           &dev->adj_list.lower);
5425         }
5426 }
5427
5428 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5429 {
5430         struct netdev_adjacent *iter;
5431
5432         struct net *net = dev_net(dev);
5433
5434         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5435                 if (!net_eq(net,dev_net(iter->dev)))
5436                         continue;
5437                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5438                                           &iter->dev->adj_list.lower);
5439                 netdev_adjacent_sysfs_add(iter->dev, dev,
5440                                           &iter->dev->adj_list.lower);
5441         }
5442
5443         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5444                 if (!net_eq(net,dev_net(iter->dev)))
5445                         continue;
5446                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5447                                           &iter->dev->adj_list.upper);
5448                 netdev_adjacent_sysfs_add(iter->dev, dev,
5449                                           &iter->dev->adj_list.upper);
5450         }
5451 }
5452
5453 void *netdev_lower_dev_get_private(struct net_device *dev,
5454                                    struct net_device *lower_dev)
5455 {
5456         struct netdev_adjacent *lower;
5457
5458         if (!lower_dev)
5459                 return NULL;
5460         lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5461         if (!lower)
5462                 return NULL;
5463
5464         return lower->private;
5465 }
5466 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5467
5468
5469 int dev_get_nest_level(struct net_device *dev,
5470                        bool (*type_check)(struct net_device *dev))
5471 {
5472         struct net_device *lower = NULL;
5473         struct list_head *iter;
5474         int max_nest = -1;
5475         int nest;
5476
5477         ASSERT_RTNL();
5478
5479         netdev_for_each_lower_dev(dev, lower, iter) {
5480                 nest = dev_get_nest_level(lower, type_check);
5481                 if (max_nest < nest)
5482                         max_nest = nest;
5483         }
5484
5485         if (type_check(dev))
5486                 max_nest++;
5487
5488         return max_nest;
5489 }
5490 EXPORT_SYMBOL(dev_get_nest_level);
5491
5492 static void dev_change_rx_flags(struct net_device *dev, int flags)
5493 {
5494         const struct net_device_ops *ops = dev->netdev_ops;
5495
5496         if (ops->ndo_change_rx_flags)
5497                 ops->ndo_change_rx_flags(dev, flags);
5498 }
5499
5500 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5501 {
5502         unsigned int old_flags = dev->flags;
5503         kuid_t uid;
5504         kgid_t gid;
5505
5506         ASSERT_RTNL();
5507
5508         dev->flags |= IFF_PROMISC;
5509         dev->promiscuity += inc;
5510         if (dev->promiscuity == 0) {
5511                 /*
5512                  * Avoid overflow.
5513                  * If inc causes overflow, untouch promisc and return error.
5514                  */
5515                 if (inc < 0)
5516                         dev->flags &= ~IFF_PROMISC;
5517                 else {
5518                         dev->promiscuity -= inc;
5519                         pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5520                                 dev->name);
5521                         return -EOVERFLOW;
5522                 }
5523         }
5524         if (dev->flags != old_flags) {
5525                 pr_info("device %s %s promiscuous mode\n",
5526                         dev->name,
5527                         dev->flags & IFF_PROMISC ? "entered" : "left");
5528                 if (audit_enabled) {
5529                         current_uid_gid(&uid, &gid);
5530                         audit_log(current->audit_context, GFP_ATOMIC,
5531                                 AUDIT_ANOM_PROMISCUOUS,
5532                                 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5533                                 dev->name, (dev->flags & IFF_PROMISC),
5534                                 (old_flags & IFF_PROMISC),
5535                                 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5536                                 from_kuid(&init_user_ns, uid),
5537                                 from_kgid(&init_user_ns, gid),
5538                                 audit_get_sessionid(current));
5539                 }
5540
5541                 dev_change_rx_flags(dev, IFF_PROMISC);
5542         }
5543         if (notify)
5544                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5545         return 0;
5546 }
5547
5548 /**
5549  *      dev_set_promiscuity     - update promiscuity count on a device
5550  *      @dev: device
5551  *      @inc: modifier
5552  *
5553  *      Add or remove promiscuity from a device. While the count in the device
5554  *      remains above zero the interface remains promiscuous. Once it hits zero
5555  *      the device reverts back to normal filtering operation. A negative inc
5556  *      value is used to drop promiscuity on the device.
5557  *      Return 0 if successful or a negative errno code on error.
5558  */
5559 int dev_set_promiscuity(struct net_device *dev, int inc)
5560 {
5561         unsigned int old_flags = dev->flags;
5562         int err;
5563
5564         err = __dev_set_promiscuity(dev, inc, true);
5565         if (err < 0)
5566                 return err;
5567         if (dev->flags != old_flags)
5568                 dev_set_rx_mode(dev);
5569         return err;
5570 }
5571 EXPORT_SYMBOL(dev_set_promiscuity);
5572
5573 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5574 {
5575         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5576
5577         ASSERT_RTNL();
5578
5579         dev->flags |= IFF_ALLMULTI;
5580         dev->allmulti += inc;
5581         if (dev->allmulti == 0) {
5582                 /*
5583                  * Avoid overflow.
5584                  * If inc causes overflow, untouch allmulti and return error.
5585                  */
5586                 if (inc < 0)
5587                         dev->flags &= ~IFF_ALLMULTI;
5588                 else {
5589                         dev->allmulti -= inc;
5590                         pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5591                                 dev->name);
5592                         return -EOVERFLOW;
5593                 }
5594         }
5595         if (dev->flags ^ old_flags) {
5596                 dev_change_rx_flags(dev, IFF_ALLMULTI);
5597                 dev_set_rx_mode(dev);
5598                 if (notify)
5599                         __dev_notify_flags(dev, old_flags,
5600                                            dev->gflags ^ old_gflags);
5601         }
5602         return 0;
5603 }
5604
5605 /**
5606  *      dev_set_allmulti        - update allmulti count on a device
5607  *      @dev: device
5608  *      @inc: modifier
5609  *
5610  *      Add or remove reception of all multicast frames to a device. While the
5611  *      count in the device remains above zero the interface remains listening
5612  *      to all interfaces. Once it hits zero the device reverts back to normal
5613  *      filtering operation. A negative @inc value is used to drop the counter
5614  *      when releasing a resource needing all multicasts.
5615  *      Return 0 if successful or a negative errno code on error.
5616  */
5617
5618 int dev_set_allmulti(struct net_device *dev, int inc)
5619 {
5620         return __dev_set_allmulti(dev, inc, true);
5621 }
5622 EXPORT_SYMBOL(dev_set_allmulti);
5623
5624 /*
5625  *      Upload unicast and multicast address lists to device and
5626  *      configure RX filtering. When the device doesn't support unicast
5627  *      filtering it is put in promiscuous mode while unicast addresses
5628  *      are present.
5629  */
5630 void __dev_set_rx_mode(struct net_device *dev)
5631 {
5632         const struct net_device_ops *ops = dev->netdev_ops;
5633
5634         /* dev_open will call this function so the list will stay sane. */
5635         if (!(dev->flags&IFF_UP))
5636                 return;
5637
5638         if (!netif_device_present(dev))
5639                 return;
5640
5641         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5642                 /* Unicast addresses changes may only happen under the rtnl,
5643                  * therefore calling __dev_set_promiscuity here is safe.
5644                  */
5645                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5646                         __dev_set_promiscuity(dev, 1, false);
5647                         dev->uc_promisc = true;
5648                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5649                         __dev_set_promiscuity(dev, -1, false);
5650                         dev->uc_promisc = false;
5651                 }
5652         }
5653
5654         if (ops->ndo_set_rx_mode)
5655                 ops->ndo_set_rx_mode(dev);
5656 }
5657
5658 void dev_set_rx_mode(struct net_device *dev)
5659 {
5660         netif_addr_lock_bh(dev);
5661         __dev_set_rx_mode(dev);
5662         netif_addr_unlock_bh(dev);
5663 }
5664
5665 /**
5666  *      dev_get_flags - get flags reported to userspace
5667  *      @dev: device
5668  *
5669  *      Get the combination of flag bits exported through APIs to userspace.
5670  */
5671 unsigned int dev_get_flags(const struct net_device *dev)
5672 {
5673         unsigned int flags;
5674
5675         flags = (dev->flags & ~(IFF_PROMISC |
5676                                 IFF_ALLMULTI |
5677                                 IFF_RUNNING |
5678                                 IFF_LOWER_UP |
5679                                 IFF_DORMANT)) |
5680                 (dev->gflags & (IFF_PROMISC |
5681                                 IFF_ALLMULTI));
5682
5683         if (netif_running(dev)) {
5684                 if (netif_oper_up(dev))
5685                         flags |= IFF_RUNNING;
5686                 if (netif_carrier_ok(dev))
5687                         flags |= IFF_LOWER_UP;
5688                 if (netif_dormant(dev))
5689                         flags |= IFF_DORMANT;
5690         }
5691
5692         return flags;
5693 }
5694 EXPORT_SYMBOL(dev_get_flags);
5695
5696 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5697 {
5698         unsigned int old_flags = dev->flags;
5699         int ret;
5700
5701         ASSERT_RTNL();
5702
5703         /*
5704          *      Set the flags on our device.
5705          */
5706
5707         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5708                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5709                                IFF_AUTOMEDIA)) |
5710                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5711                                     IFF_ALLMULTI));
5712
5713         /*
5714          *      Load in the correct multicast list now the flags have changed.
5715          */
5716
5717         if ((old_flags ^ flags) & IFF_MULTICAST)
5718                 dev_change_rx_flags(dev, IFF_MULTICAST);
5719
5720         dev_set_rx_mode(dev);
5721
5722         /*
5723          *      Have we downed the interface. We handle IFF_UP ourselves
5724          *      according to user attempts to set it, rather than blindly
5725          *      setting it.
5726          */
5727
5728         ret = 0;
5729         if ((old_flags ^ flags) & IFF_UP)
5730                 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5731
5732         if ((flags ^ dev->gflags) & IFF_PROMISC) {
5733                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5734                 unsigned int old_flags = dev->flags;
5735
5736                 dev->gflags ^= IFF_PROMISC;
5737
5738                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5739                         if (dev->flags != old_flags)
5740                                 dev_set_rx_mode(dev);
5741         }
5742
5743         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5744            is important. Some (broken) drivers set IFF_PROMISC, when
5745            IFF_ALLMULTI is requested not asking us and not reporting.
5746          */
5747         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5748                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5749
5750                 dev->gflags ^= IFF_ALLMULTI;
5751                 __dev_set_allmulti(dev, inc, false);
5752         }
5753
5754         return ret;
5755 }
5756
5757 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5758                         unsigned int gchanges)
5759 {
5760         unsigned int changes = dev->flags ^ old_flags;
5761
5762         if (gchanges)
5763                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5764
5765         if (changes & IFF_UP) {
5766                 if (dev->flags & IFF_UP)
5767                         call_netdevice_notifiers(NETDEV_UP, dev);
5768                 else
5769                         call_netdevice_notifiers(NETDEV_DOWN, dev);
5770         }
5771
5772         if (dev->flags & IFF_UP &&
5773             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5774                 struct netdev_notifier_change_info change_info;
5775
5776                 change_info.flags_changed = changes;
5777                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5778                                               &change_info.info);
5779         }
5780 }
5781
5782 /**
5783  *      dev_change_flags - change device settings
5784  *      @dev: device
5785  *      @flags: device state flags
5786  *
5787  *      Change settings on device based state flags. The flags are
5788  *      in the userspace exported format.
5789  */
5790 int dev_change_flags(struct net_device *dev, unsigned int flags)
5791 {
5792         int ret;
5793         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5794
5795         ret = __dev_change_flags(dev, flags);
5796         if (ret < 0)
5797                 return ret;
5798
5799         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5800         __dev_notify_flags(dev, old_flags, changes);
5801         return ret;
5802 }
5803 EXPORT_SYMBOL(dev_change_flags);
5804
5805 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5806 {
5807         const struct net_device_ops *ops = dev->netdev_ops;
5808
5809         if (ops->ndo_change_mtu)
5810                 return ops->ndo_change_mtu(dev, new_mtu);
5811
5812         dev->mtu = new_mtu;
5813         return 0;
5814 }
5815
5816 /**
5817  *      dev_set_mtu - Change maximum transfer unit
5818  *      @dev: device
5819  *      @new_mtu: new transfer unit
5820  *
5821  *      Change the maximum transfer size of the network device.
5822  */
5823 int dev_set_mtu(struct net_device *dev, int new_mtu)
5824 {
5825         int err, orig_mtu;
5826
5827         if (new_mtu == dev->mtu)
5828                 return 0;
5829
5830         /*      MTU must be positive.    */
5831         if (new_mtu < 0)
5832                 return -EINVAL;
5833
5834         if (!netif_device_present(dev))
5835                 return -ENODEV;
5836
5837         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5838         err = notifier_to_errno(err);
5839         if (err)
5840                 return err;
5841
5842         orig_mtu = dev->mtu;
5843         err = __dev_set_mtu(dev, new_mtu);
5844
5845         if (!err) {
5846                 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5847                 err = notifier_to_errno(err);
5848                 if (err) {
5849                         /* setting mtu back and notifying everyone again,
5850                          * so that they have a chance to revert changes.
5851                          */
5852                         __dev_set_mtu(dev, orig_mtu);
5853                         call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5854                 }
5855         }
5856         return err;
5857 }
5858 EXPORT_SYMBOL(dev_set_mtu);
5859
5860 /**
5861  *      dev_set_group - Change group this device belongs to
5862  *      @dev: device
5863  *      @new_group: group this device should belong to
5864  */
5865 void dev_set_group(struct net_device *dev, int new_group)
5866 {
5867         dev->group = new_group;
5868 }
5869 EXPORT_SYMBOL(dev_set_group);
5870
5871 /**
5872  *      dev_set_mac_address - Change Media Access Control Address
5873  *      @dev: device
5874  *      @sa: new address
5875  *
5876  *      Change the hardware (MAC) address of the device
5877  */
5878 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5879 {
5880         const struct net_device_ops *ops = dev->netdev_ops;
5881         int err;
5882
5883         if (!ops->ndo_set_mac_address)
5884                 return -EOPNOTSUPP;
5885         if (sa->sa_family != dev->type)
5886                 return -EINVAL;
5887         if (!netif_device_present(dev))
5888                 return -ENODEV;
5889         err = ops->ndo_set_mac_address(dev, sa);
5890         if (err)
5891                 return err;
5892         dev->addr_assign_type = NET_ADDR_SET;
5893         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5894         add_device_randomness(dev->dev_addr, dev->addr_len);
5895         return 0;
5896 }
5897 EXPORT_SYMBOL(dev_set_mac_address);
5898
5899 /**
5900  *      dev_change_carrier - Change device carrier
5901  *      @dev: device
5902  *      @new_carrier: new value
5903  *
5904  *      Change device carrier
5905  */
5906 int dev_change_carrier(struct net_device *dev, bool new_carrier)
5907 {
5908         const struct net_device_ops *ops = dev->netdev_ops;
5909
5910         if (!ops->ndo_change_carrier)
5911                 return -EOPNOTSUPP;
5912         if (!netif_device_present(dev))
5913                 return -ENODEV;
5914         return ops->ndo_change_carrier(dev, new_carrier);
5915 }
5916 EXPORT_SYMBOL(dev_change_carrier);
5917
5918 /**
5919  *      dev_get_phys_port_id - Get device physical port ID
5920  *      @dev: device
5921  *      @ppid: port ID
5922  *
5923  *      Get device physical port ID
5924  */
5925 int dev_get_phys_port_id(struct net_device *dev,
5926                          struct netdev_phys_item_id *ppid)
5927 {
5928         const struct net_device_ops *ops = dev->netdev_ops;
5929
5930         if (!ops->ndo_get_phys_port_id)
5931                 return -EOPNOTSUPP;
5932         return ops->ndo_get_phys_port_id(dev, ppid);
5933 }
5934 EXPORT_SYMBOL(dev_get_phys_port_id);
5935
5936 /**
5937  *      dev_get_phys_port_name - Get device physical port name
5938  *      @dev: device
5939  *      @name: port name
5940  *
5941  *      Get device physical port name
5942  */
5943 int dev_get_phys_port_name(struct net_device *dev,
5944                            char *name, size_t len)
5945 {
5946         const struct net_device_ops *ops = dev->netdev_ops;
5947
5948         if (!ops->ndo_get_phys_port_name)
5949                 return -EOPNOTSUPP;
5950         return ops->ndo_get_phys_port_name(dev, name, len);
5951 }
5952 EXPORT_SYMBOL(dev_get_phys_port_name);
5953
5954 /**
5955  *      dev_new_index   -       allocate an ifindex
5956  *      @net: the applicable net namespace
5957  *
5958  *      Returns a suitable unique value for a new device interface
5959  *      number.  The caller must hold the rtnl semaphore or the
5960  *      dev_base_lock to be sure it remains unique.
5961  */
5962 static int dev_new_index(struct net *net)
5963 {
5964         int ifindex = net->ifindex;
5965         for (;;) {
5966                 if (++ifindex <= 0)
5967                         ifindex = 1;
5968                 if (!__dev_get_by_index(net, ifindex))
5969                         return net->ifindex = ifindex;
5970         }
5971 }
5972
5973 /* Delayed registration/unregisteration */
5974 static LIST_HEAD(net_todo_list);
5975 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5976
5977 static void net_set_todo(struct net_device *dev)
5978 {
5979         list_add_tail(&dev->todo_list, &net_todo_list);
5980         dev_net(dev)->dev_unreg_count++;
5981 }
5982
5983 static void rollback_registered_many(struct list_head *head)
5984 {
5985         struct net_device *dev, *tmp;
5986         LIST_HEAD(close_head);
5987
5988         BUG_ON(dev_boot_phase);
5989         ASSERT_RTNL();
5990
5991         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5992                 /* Some devices call without registering
5993                  * for initialization unwind. Remove those
5994                  * devices and proceed with the remaining.
5995                  */
5996                 if (dev->reg_state == NETREG_UNINITIALIZED) {
5997                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5998                                  dev->name, dev);
5999
6000                         WARN_ON(1);
6001                         list_del(&dev->unreg_list);
6002                         continue;
6003                 }
6004                 dev->dismantle = true;
6005                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
6006         }
6007
6008         /* If device is running, close it first. */
6009         list_for_each_entry(dev, head, unreg_list)
6010                 list_add_tail(&dev->close_list, &close_head);
6011         dev_close_many(&close_head, true);
6012
6013         list_for_each_entry(dev, head, unreg_list) {
6014                 /* And unlink it from device chain. */
6015                 unlist_netdevice(dev);
6016
6017                 dev->reg_state = NETREG_UNREGISTERING;
6018         }
6019
6020         synchronize_net();
6021
6022         list_for_each_entry(dev, head, unreg_list) {
6023                 struct sk_buff *skb = NULL;
6024
6025                 /* Shutdown queueing discipline. */
6026                 dev_shutdown(dev);
6027
6028
6029                 /* Notify protocols, that we are about to destroy
6030                    this device. They should clean all the things.
6031                 */
6032                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6033
6034                 if (!dev->rtnl_link_ops ||
6035                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6036                         skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6037                                                      GFP_KERNEL);
6038
6039                 /*
6040                  *      Flush the unicast and multicast chains
6041                  */
6042                 dev_uc_flush(dev);
6043                 dev_mc_flush(dev);
6044
6045                 if (dev->netdev_ops->ndo_uninit)
6046                         dev->netdev_ops->ndo_uninit(dev);
6047
6048                 if (skb)
6049                         rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
6050
6051                 /* Notifier chain MUST detach us all upper devices. */
6052                 WARN_ON(netdev_has_any_upper_dev(dev));
6053
6054                 /* Remove entries from kobject tree */
6055                 netdev_unregister_kobject(dev);
6056 #ifdef CONFIG_XPS
6057                 /* Remove XPS queueing entries */
6058                 netif_reset_xps_queues_gt(dev, 0);
6059 #endif
6060         }
6061
6062         synchronize_net();
6063
6064         list_for_each_entry(dev, head, unreg_list)
6065                 dev_put(dev);
6066 }
6067
6068 static void rollback_registered(struct net_device *dev)
6069 {
6070         LIST_HEAD(single);
6071
6072         list_add(&dev->unreg_list, &single);
6073         rollback_registered_many(&single);
6074         list_del(&single);
6075 }
6076
6077 static netdev_features_t netdev_fix_features(struct net_device *dev,
6078         netdev_features_t features)
6079 {
6080         /* Fix illegal checksum combinations */
6081         if ((features & NETIF_F_HW_CSUM) &&
6082             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6083                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
6084                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6085         }
6086
6087         /* TSO requires that SG is present as well. */
6088         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6089                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
6090                 features &= ~NETIF_F_ALL_TSO;
6091         }
6092
6093         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6094                                         !(features & NETIF_F_IP_CSUM)) {
6095                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6096                 features &= ~NETIF_F_TSO;
6097                 features &= ~NETIF_F_TSO_ECN;
6098         }
6099
6100         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6101                                          !(features & NETIF_F_IPV6_CSUM)) {
6102                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6103                 features &= ~NETIF_F_TSO6;
6104         }
6105
6106         /* TSO ECN requires that TSO is present as well. */
6107         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6108                 features &= ~NETIF_F_TSO_ECN;
6109
6110         /* Software GSO depends on SG. */
6111         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6112                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
6113                 features &= ~NETIF_F_GSO;
6114         }
6115
6116         /* UFO needs SG and checksumming */
6117         if (features & NETIF_F_UFO) {
6118                 /* maybe split UFO into V4 and V6? */
6119                 if (!((features & NETIF_F_GEN_CSUM) ||
6120                     (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6121                             == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6122                         netdev_dbg(dev,
6123                                 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6124                         features &= ~NETIF_F_UFO;
6125                 }
6126
6127                 if (!(features & NETIF_F_SG)) {
6128                         netdev_dbg(dev,
6129                                 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6130                         features &= ~NETIF_F_UFO;
6131                 }
6132         }
6133
6134 #ifdef CONFIG_NET_RX_BUSY_POLL
6135         if (dev->netdev_ops->ndo_busy_poll)
6136                 features |= NETIF_F_BUSY_POLL;
6137         else
6138 #endif
6139                 features &= ~NETIF_F_BUSY_POLL;
6140
6141         return features;
6142 }
6143
6144 int __netdev_update_features(struct net_device *dev)
6145 {
6146         netdev_features_t features;
6147         int err = 0;
6148
6149         ASSERT_RTNL();
6150
6151         features = netdev_get_wanted_features(dev);
6152
6153         if (dev->netdev_ops->ndo_fix_features)
6154                 features = dev->netdev_ops->ndo_fix_features(dev, features);
6155
6156         /* driver might be less strict about feature dependencies */
6157         features = netdev_fix_features(dev, features);
6158
6159         if (dev->features == features)
6160                 return 0;
6161
6162         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6163                 &dev->features, &features);
6164
6165         if (dev->netdev_ops->ndo_set_features)
6166                 err = dev->netdev_ops->ndo_set_features(dev, features);
6167
6168         if (unlikely(err < 0)) {
6169                 netdev_err(dev,
6170                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
6171                         err, &features, &dev->features);
6172                 return -1;
6173         }
6174
6175         if (!err)
6176                 dev->features = features;
6177
6178         return 1;
6179 }
6180
6181 /**
6182  *      netdev_update_features - recalculate device features
6183  *      @dev: the device to check
6184  *
6185  *      Recalculate dev->features set and send notifications if it
6186  *      has changed. Should be called after driver or hardware dependent
6187  *      conditions might have changed that influence the features.
6188  */
6189 void netdev_update_features(struct net_device *dev)
6190 {
6191         if (__netdev_update_features(dev))
6192                 netdev_features_change(dev);
6193 }
6194 EXPORT_SYMBOL(netdev_update_features);
6195
6196 /**
6197  *      netdev_change_features - recalculate device features
6198  *      @dev: the device to check
6199  *
6200  *      Recalculate dev->features set and send notifications even
6201  *      if they have not changed. Should be called instead of
6202  *      netdev_update_features() if also dev->vlan_features might
6203  *      have changed to allow the changes to be propagated to stacked
6204  *      VLAN devices.
6205  */
6206 void netdev_change_features(struct net_device *dev)
6207 {
6208         __netdev_update_features(dev);
6209         netdev_features_change(dev);
6210 }
6211 EXPORT_SYMBOL(netdev_change_features);
6212
6213 /**
6214  *      netif_stacked_transfer_operstate -      transfer operstate
6215  *      @rootdev: the root or lower level device to transfer state from
6216  *      @dev: the device to transfer operstate to
6217  *
6218  *      Transfer operational state from root to device. This is normally
6219  *      called when a stacking relationship exists between the root
6220  *      device and the device(a leaf device).
6221  */
6222 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6223                                         struct net_device *dev)
6224 {
6225         if (rootdev->operstate == IF_OPER_DORMANT)
6226                 netif_dormant_on(dev);
6227         else
6228                 netif_dormant_off(dev);
6229
6230         if (netif_carrier_ok(rootdev)) {
6231                 if (!netif_carrier_ok(dev))
6232                         netif_carrier_on(dev);
6233         } else {
6234                 if (netif_carrier_ok(dev))
6235                         netif_carrier_off(dev);
6236         }
6237 }
6238 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6239
6240 #ifdef CONFIG_SYSFS
6241 static int netif_alloc_rx_queues(struct net_device *dev)
6242 {
6243         unsigned int i, count = dev->num_rx_queues;
6244         struct netdev_rx_queue *rx;
6245         size_t sz = count * sizeof(*rx);
6246
6247         BUG_ON(count < 1);
6248
6249         rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6250         if (!rx) {
6251                 rx = vzalloc(sz);
6252                 if (!rx)
6253                         return -ENOMEM;
6254         }
6255         dev->_rx = rx;
6256
6257         for (i = 0; i < count; i++)
6258                 rx[i].dev = dev;
6259         return 0;
6260 }
6261 #endif
6262
6263 static void netdev_init_one_queue(struct net_device *dev,
6264                                   struct netdev_queue *queue, void *_unused)
6265 {
6266         /* Initialize queue lock */
6267         spin_lock_init(&queue->_xmit_lock);
6268         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6269         queue->xmit_lock_owner = -1;
6270         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6271         queue->dev = dev;
6272 #ifdef CONFIG_BQL
6273         dql_init(&queue->dql, HZ);
6274 #endif
6275 }
6276
6277 static void netif_free_tx_queues(struct net_device *dev)
6278 {
6279         kvfree(dev->_tx);
6280 }
6281
6282 static int netif_alloc_netdev_queues(struct net_device *dev)
6283 {
6284         unsigned int count = dev->num_tx_queues;
6285         struct netdev_queue *tx;
6286         size_t sz = count * sizeof(*tx);
6287
6288         BUG_ON(count < 1 || count > 0xffff);
6289
6290         tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6291         if (!tx) {
6292                 tx = vzalloc(sz);
6293                 if (!tx)
6294                         return -ENOMEM;
6295         }
6296         dev->_tx = tx;
6297
6298         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6299         spin_lock_init(&dev->tx_global_lock);
6300
6301         return 0;
6302 }
6303
6304 void netif_tx_stop_all_queues(struct net_device *dev)
6305 {
6306         unsigned int i;
6307
6308         for (i = 0; i < dev->num_tx_queues; i++) {
6309                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6310                 netif_tx_stop_queue(txq);
6311         }
6312 }
6313 EXPORT_SYMBOL(netif_tx_stop_all_queues);
6314
6315 /**
6316  *      register_netdevice      - register a network device
6317  *      @dev: device to register
6318  *
6319  *      Take a completed network device structure and add it to the kernel
6320  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6321  *      chain. 0 is returned on success. A negative errno code is returned
6322  *      on a failure to set up the device, or if the name is a duplicate.
6323  *
6324  *      Callers must hold the rtnl semaphore. You may want
6325  *      register_netdev() instead of this.
6326  *
6327  *      BUGS:
6328  *      The locking appears insufficient to guarantee two parallel registers
6329  *      will not get the same name.
6330  */
6331
6332 int register_netdevice(struct net_device *dev)
6333 {
6334         int ret;
6335         struct net *net = dev_net(dev);
6336
6337         BUG_ON(dev_boot_phase);
6338         ASSERT_RTNL();
6339
6340         might_sleep();
6341
6342         /* When net_device's are persistent, this will be fatal. */
6343         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6344         BUG_ON(!net);
6345
6346         spin_lock_init(&dev->addr_list_lock);
6347         netdev_set_addr_lockdep_class(dev);
6348
6349         ret = dev_get_valid_name(net, dev, dev->name);
6350         if (ret < 0)
6351                 goto out;
6352
6353         /* Init, if this function is available */
6354         if (dev->netdev_ops->ndo_init) {
6355                 ret = dev->netdev_ops->ndo_init(dev);
6356                 if (ret) {
6357                         if (ret > 0)
6358                                 ret = -EIO;
6359                         goto out;
6360                 }
6361         }
6362
6363         if (((dev->hw_features | dev->features) &
6364              NETIF_F_HW_VLAN_CTAG_FILTER) &&
6365             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6366              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6367                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6368                 ret = -EINVAL;
6369                 goto err_uninit;
6370         }
6371
6372         ret = -EBUSY;
6373         if (!dev->ifindex)
6374                 dev->ifindex = dev_new_index(net);
6375         else if (__dev_get_by_index(net, dev->ifindex))
6376                 goto err_uninit;
6377
6378         /* Transfer changeable features to wanted_features and enable
6379          * software offloads (GSO and GRO).
6380          */
6381         dev->hw_features |= NETIF_F_SOFT_FEATURES;
6382         dev->features |= NETIF_F_SOFT_FEATURES;
6383         dev->wanted_features = dev->features & dev->hw_features;
6384
6385         if (!(dev->flags & IFF_LOOPBACK)) {
6386                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6387         }
6388
6389         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6390          */
6391         dev->vlan_features |= NETIF_F_HIGHDMA;
6392
6393         /* Make NETIF_F_SG inheritable to tunnel devices.
6394          */
6395         dev->hw_enc_features |= NETIF_F_SG;
6396
6397         /* Make NETIF_F_SG inheritable to MPLS.
6398          */
6399         dev->mpls_features |= NETIF_F_SG;
6400
6401         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6402         ret = notifier_to_errno(ret);
6403         if (ret)
6404                 goto err_uninit;
6405
6406         ret = netdev_register_kobject(dev);
6407         if (ret)
6408                 goto err_uninit;
6409         dev->reg_state = NETREG_REGISTERED;
6410
6411         __netdev_update_features(dev);
6412
6413         /*
6414          *      Default initial state at registry is that the
6415          *      device is present.
6416          */
6417
6418         set_bit(__LINK_STATE_PRESENT, &dev->state);
6419
6420         linkwatch_init_dev(dev);
6421
6422         dev_init_scheduler(dev);
6423         dev_hold(dev);
6424         list_netdevice(dev);
6425         add_device_randomness(dev->dev_addr, dev->addr_len);
6426
6427         /* If the device has permanent device address, driver should
6428          * set dev_addr and also addr_assign_type should be set to
6429          * NET_ADDR_PERM (default value).
6430          */
6431         if (dev->addr_assign_type == NET_ADDR_PERM)
6432                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6433
6434         /* Notify protocols, that a new device appeared. */
6435         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6436         ret = notifier_to_errno(ret);
6437         if (ret) {
6438                 rollback_registered(dev);
6439                 dev->reg_state = NETREG_UNREGISTERED;
6440         }
6441         /*
6442          *      Prevent userspace races by waiting until the network
6443          *      device is fully setup before sending notifications.
6444          */
6445         if (!dev->rtnl_link_ops ||
6446             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6447                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6448
6449 out:
6450         return ret;
6451
6452 err_uninit:
6453         if (dev->netdev_ops->ndo_uninit)
6454                 dev->netdev_ops->ndo_uninit(dev);
6455         goto out;
6456 }
6457 EXPORT_SYMBOL(register_netdevice);
6458
6459 /**
6460  *      init_dummy_netdev       - init a dummy network device for NAPI
6461  *      @dev: device to init
6462  *
6463  *      This takes a network device structure and initialize the minimum
6464  *      amount of fields so it can be used to schedule NAPI polls without
6465  *      registering a full blown interface. This is to be used by drivers
6466  *      that need to tie several hardware interfaces to a single NAPI
6467  *      poll scheduler due to HW limitations.
6468  */
6469 int init_dummy_netdev(struct net_device *dev)
6470 {
6471         /* Clear everything. Note we don't initialize spinlocks
6472          * are they aren't supposed to be taken by any of the
6473          * NAPI code and this dummy netdev is supposed to be
6474          * only ever used for NAPI polls
6475          */
6476         memset(dev, 0, sizeof(struct net_device));
6477
6478         /* make sure we BUG if trying to hit standard
6479          * register/unregister code path
6480          */
6481         dev->reg_state = NETREG_DUMMY;
6482
6483         /* NAPI wants this */
6484         INIT_LIST_HEAD(&dev->napi_list);
6485
6486         /* a dummy interface is started by default */
6487         set_bit(__LINK_STATE_PRESENT, &dev->state);
6488         set_bit(__LINK_STATE_START, &dev->state);
6489
6490         /* Note : We dont allocate pcpu_refcnt for dummy devices,
6491          * because users of this 'device' dont need to change
6492          * its refcount.
6493          */
6494
6495         return 0;
6496 }
6497 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6498
6499
6500 /**
6501  *      register_netdev - register a network device
6502  *      @dev: device to register
6503  *
6504  *      Take a completed network device structure and add it to the kernel
6505  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6506  *      chain. 0 is returned on success. A negative errno code is returned
6507  *      on a failure to set up the device, or if the name is a duplicate.
6508  *
6509  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
6510  *      and expands the device name if you passed a format string to
6511  *      alloc_netdev.
6512  */
6513 int register_netdev(struct net_device *dev)
6514 {
6515         int err;
6516
6517         rtnl_lock();
6518         err = register_netdevice(dev);
6519         rtnl_unlock();
6520         return err;
6521 }
6522 EXPORT_SYMBOL(register_netdev);
6523
6524 int netdev_refcnt_read(const struct net_device *dev)
6525 {
6526         int i, refcnt = 0;
6527
6528         for_each_possible_cpu(i)
6529                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6530         return refcnt;
6531 }
6532 EXPORT_SYMBOL(netdev_refcnt_read);
6533
6534 /**
6535  * netdev_wait_allrefs - wait until all references are gone.
6536  * @dev: target net_device
6537  *
6538  * This is called when unregistering network devices.
6539  *
6540  * Any protocol or device that holds a reference should register
6541  * for netdevice notification, and cleanup and put back the
6542  * reference if they receive an UNREGISTER event.
6543  * We can get stuck here if buggy protocols don't correctly
6544  * call dev_put.
6545  */
6546 static void netdev_wait_allrefs(struct net_device *dev)
6547 {
6548         unsigned long rebroadcast_time, warning_time;
6549         int refcnt;
6550
6551         linkwatch_forget_dev(dev);
6552
6553         rebroadcast_time = warning_time = jiffies;
6554         refcnt = netdev_refcnt_read(dev);
6555
6556         while (refcnt != 0) {
6557                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6558                         rtnl_lock();
6559
6560                         /* Rebroadcast unregister notification */
6561                         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6562
6563                         __rtnl_unlock();
6564                         rcu_barrier();
6565                         rtnl_lock();
6566
6567                         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6568                         if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6569                                      &dev->state)) {
6570                                 /* We must not have linkwatch events
6571                                  * pending on unregister. If this
6572                                  * happens, we simply run the queue
6573                                  * unscheduled, resulting in a noop
6574                                  * for this device.
6575                                  */
6576                                 linkwatch_run_queue();
6577                         }
6578
6579                         __rtnl_unlock();
6580
6581                         rebroadcast_time = jiffies;
6582                 }
6583
6584                 msleep(250);
6585
6586                 refcnt = netdev_refcnt_read(dev);
6587
6588                 if (time_after(jiffies, warning_time + 10 * HZ)) {
6589                         pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6590                                  dev->name, refcnt);
6591                         warning_time = jiffies;
6592                 }
6593         }
6594 }
6595
6596 /* The sequence is:
6597  *
6598  *      rtnl_lock();
6599  *      ...
6600  *      register_netdevice(x1);
6601  *      register_netdevice(x2);
6602  *      ...
6603  *      unregister_netdevice(y1);
6604  *      unregister_netdevice(y2);
6605  *      ...
6606  *      rtnl_unlock();
6607  *      free_netdev(y1);
6608  *      free_netdev(y2);
6609  *
6610  * We are invoked by rtnl_unlock().
6611  * This allows us to deal with problems:
6612  * 1) We can delete sysfs objects which invoke hotplug
6613  *    without deadlocking with linkwatch via keventd.
6614  * 2) Since we run with the RTNL semaphore not held, we can sleep
6615  *    safely in order to wait for the netdev refcnt to drop to zero.
6616  *
6617  * We must not return until all unregister events added during
6618  * the interval the lock was held have been completed.
6619  */
6620 void netdev_run_todo(void)
6621 {
6622         struct list_head list;
6623
6624         /* Snapshot list, allow later requests */
6625         list_replace_init(&net_todo_list, &list);
6626
6627         __rtnl_unlock();
6628
6629
6630         /* Wait for rcu callbacks to finish before next phase */
6631         if (!list_empty(&list))
6632                 rcu_barrier();
6633
6634         while (!list_empty(&list)) {
6635                 struct net_device *dev
6636                         = list_first_entry(&list, struct net_device, todo_list);
6637                 list_del(&dev->todo_list);
6638
6639                 rtnl_lock();
6640                 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6641                 __rtnl_unlock();
6642
6643                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6644                         pr_err("network todo '%s' but state %d\n",
6645                                dev->name, dev->reg_state);
6646                         dump_stack();
6647                         continue;
6648                 }
6649
6650                 dev->reg_state = NETREG_UNREGISTERED;
6651
6652                 on_each_cpu(flush_backlog, dev, 1);
6653
6654                 netdev_wait_allrefs(dev);
6655
6656                 /* paranoia */
6657                 BUG_ON(netdev_refcnt_read(dev));
6658                 BUG_ON(!list_empty(&dev->ptype_all));
6659                 BUG_ON(!list_empty(&dev->ptype_specific));
6660                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6661                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6662                 WARN_ON(dev->dn_ptr);
6663
6664                 if (dev->destructor)
6665                         dev->destructor(dev);
6666
6667                 /* Report a network device has been unregistered */
6668                 rtnl_lock();
6669                 dev_net(dev)->dev_unreg_count--;
6670                 __rtnl_unlock();
6671                 wake_up(&netdev_unregistering_wq);
6672
6673                 /* Free network device */
6674                 kobject_put(&dev->dev.kobj);
6675         }
6676 }
6677
6678 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
6679  * fields in the same order, with only the type differing.
6680  */
6681 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6682                              const struct net_device_stats *netdev_stats)
6683 {
6684 #if BITS_PER_LONG == 64
6685         BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6686         memcpy(stats64, netdev_stats, sizeof(*stats64));
6687 #else
6688         size_t i, n = sizeof(*stats64) / sizeof(u64);
6689         const unsigned long *src = (const unsigned long *)netdev_stats;
6690         u64 *dst = (u64 *)stats64;
6691
6692         BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6693                      sizeof(*stats64) / sizeof(u64));
6694         for (i = 0; i < n; i++)
6695                 dst[i] = src[i];
6696 #endif
6697 }
6698 EXPORT_SYMBOL(netdev_stats_to_stats64);
6699
6700 /**
6701  *      dev_get_stats   - get network device statistics
6702  *      @dev: device to get statistics from
6703  *      @storage: place to store stats
6704  *
6705  *      Get network statistics from device. Return @storage.
6706  *      The device driver may provide its own method by setting
6707  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6708  *      otherwise the internal statistics structure is used.
6709  */
6710 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6711                                         struct rtnl_link_stats64 *storage)
6712 {
6713         const struct net_device_ops *ops = dev->netdev_ops;
6714
6715         if (ops->ndo_get_stats64) {
6716                 memset(storage, 0, sizeof(*storage));
6717                 ops->ndo_get_stats64(dev, storage);
6718         } else if (ops->ndo_get_stats) {
6719                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6720         } else {
6721                 netdev_stats_to_stats64(storage, &dev->stats);
6722         }
6723         storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6724         storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6725         return storage;
6726 }
6727 EXPORT_SYMBOL(dev_get_stats);
6728
6729 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6730 {
6731         struct netdev_queue *queue = dev_ingress_queue(dev);
6732
6733 #ifdef CONFIG_NET_CLS_ACT
6734         if (queue)
6735                 return queue;
6736         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6737         if (!queue)
6738                 return NULL;
6739         netdev_init_one_queue(dev, queue, NULL);
6740         RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
6741         queue->qdisc_sleeping = &noop_qdisc;
6742         rcu_assign_pointer(dev->ingress_queue, queue);
6743 #endif
6744         return queue;
6745 }
6746
6747 static const struct ethtool_ops default_ethtool_ops;
6748
6749 void netdev_set_default_ethtool_ops(struct net_device *dev,
6750                                     const struct ethtool_ops *ops)
6751 {
6752         if (dev->ethtool_ops == &default_ethtool_ops)
6753                 dev->ethtool_ops = ops;
6754 }
6755 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6756
6757 void netdev_freemem(struct net_device *dev)
6758 {
6759         char *addr = (char *)dev - dev->padded;
6760
6761         kvfree(addr);
6762 }
6763
6764 /**
6765  *      alloc_netdev_mqs - allocate network device
6766  *      @sizeof_priv:           size of private data to allocate space for
6767  *      @name:                  device name format string
6768  *      @name_assign_type:      origin of device name
6769  *      @setup:                 callback to initialize device
6770  *      @txqs:                  the number of TX subqueues to allocate
6771  *      @rxqs:                  the number of RX subqueues to allocate
6772  *
6773  *      Allocates a struct net_device with private data area for driver use
6774  *      and performs basic initialization.  Also allocates subqueue structs
6775  *      for each queue on the device.
6776  */
6777 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6778                 unsigned char name_assign_type,
6779                 void (*setup)(struct net_device *),
6780                 unsigned int txqs, unsigned int rxqs)
6781 {
6782         struct net_device *dev;
6783         size_t alloc_size;
6784         struct net_device *p;
6785
6786         BUG_ON(strlen(name) >= sizeof(dev->name));
6787
6788         if (txqs < 1) {
6789                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6790                 return NULL;
6791         }
6792
6793 #ifdef CONFIG_SYSFS
6794         if (rxqs < 1) {
6795                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6796                 return NULL;
6797         }
6798 #endif
6799
6800         alloc_size = sizeof(struct net_device);
6801         if (sizeof_priv) {
6802                 /* ensure 32-byte alignment of private area */
6803                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6804                 alloc_size += sizeof_priv;
6805         }
6806         /* ensure 32-byte alignment of whole construct */
6807         alloc_size += NETDEV_ALIGN - 1;
6808
6809         p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6810         if (!p)
6811                 p = vzalloc(alloc_size);
6812         if (!p)
6813                 return NULL;
6814
6815         dev = PTR_ALIGN(p, NETDEV_ALIGN);
6816         dev->padded = (char *)dev - (char *)p;
6817
6818         dev->pcpu_refcnt = alloc_percpu(int);
6819         if (!dev->pcpu_refcnt)
6820                 goto free_dev;
6821
6822         if (dev_addr_init(dev))
6823                 goto free_pcpu;
6824
6825         dev_mc_init(dev);
6826         dev_uc_init(dev);
6827
6828         dev_net_set(dev, &init_net);
6829
6830         dev->gso_max_size = GSO_MAX_SIZE;
6831         dev->gso_max_segs = GSO_MAX_SEGS;
6832         dev->gso_min_segs = 0;
6833
6834         INIT_LIST_HEAD(&dev->napi_list);
6835         INIT_LIST_HEAD(&dev->unreg_list);
6836         INIT_LIST_HEAD(&dev->close_list);
6837         INIT_LIST_HEAD(&dev->link_watch_list);
6838         INIT_LIST_HEAD(&dev->adj_list.upper);
6839         INIT_LIST_HEAD(&dev->adj_list.lower);
6840         INIT_LIST_HEAD(&dev->all_adj_list.upper);
6841         INIT_LIST_HEAD(&dev->all_adj_list.lower);
6842         INIT_LIST_HEAD(&dev->ptype_all);
6843         INIT_LIST_HEAD(&dev->ptype_specific);
6844         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
6845         setup(dev);
6846
6847         dev->num_tx_queues = txqs;
6848         dev->real_num_tx_queues = txqs;
6849         if (netif_alloc_netdev_queues(dev))
6850                 goto free_all;
6851
6852 #ifdef CONFIG_SYSFS
6853         dev->num_rx_queues = rxqs;
6854         dev->real_num_rx_queues = rxqs;
6855         if (netif_alloc_rx_queues(dev))
6856                 goto free_all;
6857 #endif
6858
6859         strcpy(dev->name, name);
6860         dev->name_assign_type = name_assign_type;
6861         dev->group = INIT_NETDEV_GROUP;
6862         if (!dev->ethtool_ops)
6863                 dev->ethtool_ops = &default_ethtool_ops;
6864         return dev;
6865
6866 free_all:
6867         free_netdev(dev);
6868         return NULL;
6869
6870 free_pcpu:
6871         free_percpu(dev->pcpu_refcnt);
6872 free_dev:
6873         netdev_freemem(dev);
6874         return NULL;
6875 }
6876 EXPORT_SYMBOL(alloc_netdev_mqs);
6877
6878 /**
6879  *      free_netdev - free network device
6880  *      @dev: device
6881  *
6882  *      This function does the last stage of destroying an allocated device
6883  *      interface. The reference to the device object is released.
6884  *      If this is the last reference then it will be freed.
6885  */
6886 void free_netdev(struct net_device *dev)
6887 {
6888         struct napi_struct *p, *n;
6889
6890         netif_free_tx_queues(dev);
6891 #ifdef CONFIG_SYSFS
6892         kvfree(dev->_rx);
6893 #endif
6894
6895         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6896
6897         /* Flush device addresses */
6898         dev_addr_flush(dev);
6899
6900         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6901                 netif_napi_del(p);
6902
6903         free_percpu(dev->pcpu_refcnt);
6904         dev->pcpu_refcnt = NULL;
6905
6906         /*  Compatibility with error handling in drivers */
6907         if (dev->reg_state == NETREG_UNINITIALIZED) {
6908                 netdev_freemem(dev);
6909                 return;
6910         }
6911
6912         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6913         dev->reg_state = NETREG_RELEASED;
6914
6915         /* will free via device release */
6916         put_device(&dev->dev);
6917 }
6918 EXPORT_SYMBOL(free_netdev);
6919
6920 /**
6921  *      synchronize_net -  Synchronize with packet receive processing
6922  *
6923  *      Wait for packets currently being received to be done.
6924  *      Does not block later packets from starting.
6925  */
6926 void synchronize_net(void)
6927 {
6928         might_sleep();
6929         if (rtnl_is_locked())
6930                 synchronize_rcu_expedited();
6931         else
6932                 synchronize_rcu();
6933 }
6934 EXPORT_SYMBOL(synchronize_net);
6935
6936 /**
6937  *      unregister_netdevice_queue - remove device from the kernel
6938  *      @dev: device
6939  *      @head: list
6940  *
6941  *      This function shuts down a device interface and removes it
6942  *      from the kernel tables.
6943  *      If head not NULL, device is queued to be unregistered later.
6944  *
6945  *      Callers must hold the rtnl semaphore.  You may want
6946  *      unregister_netdev() instead of this.
6947  */
6948
6949 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6950 {
6951         ASSERT_RTNL();
6952
6953         if (head) {
6954                 list_move_tail(&dev->unreg_list, head);
6955         } else {
6956                 rollback_registered(dev);
6957                 /* Finish processing unregister after unlock */
6958                 net_set_todo(dev);
6959         }
6960 }
6961 EXPORT_SYMBOL(unregister_netdevice_queue);
6962
6963 /**
6964  *      unregister_netdevice_many - unregister many devices
6965  *      @head: list of devices
6966  *
6967  *  Note: As most callers use a stack allocated list_head,
6968  *  we force a list_del() to make sure stack wont be corrupted later.
6969  */
6970 void unregister_netdevice_many(struct list_head *head)
6971 {
6972         struct net_device *dev;
6973
6974         if (!list_empty(head)) {
6975                 rollback_registered_many(head);
6976                 list_for_each_entry(dev, head, unreg_list)
6977                         net_set_todo(dev);
6978                 list_del(head);
6979         }
6980 }
6981 EXPORT_SYMBOL(unregister_netdevice_many);
6982
6983 /**
6984  *      unregister_netdev - remove device from the kernel
6985  *      @dev: device
6986  *
6987  *      This function shuts down a device interface and removes it
6988  *      from the kernel tables.
6989  *
6990  *      This is just a wrapper for unregister_netdevice that takes
6991  *      the rtnl semaphore.  In general you want to use this and not
6992  *      unregister_netdevice.
6993  */
6994 void unregister_netdev(struct net_device *dev)
6995 {
6996         rtnl_lock();
6997         unregister_netdevice(dev);
6998         rtnl_unlock();
6999 }
7000 EXPORT_SYMBOL(unregister_netdev);
7001
7002 /**
7003  *      dev_change_net_namespace - move device to different nethost namespace
7004  *      @dev: device
7005  *      @net: network namespace
7006  *      @pat: If not NULL name pattern to try if the current device name
7007  *            is already taken in the destination network namespace.
7008  *
7009  *      This function shuts down a device interface and moves it
7010  *      to a new network namespace. On success 0 is returned, on
7011  *      a failure a netagive errno code is returned.
7012  *
7013  *      Callers must hold the rtnl semaphore.
7014  */
7015
7016 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7017 {
7018         int err;
7019
7020         ASSERT_RTNL();
7021
7022         /* Don't allow namespace local devices to be moved. */
7023         err = -EINVAL;
7024         if (dev->features & NETIF_F_NETNS_LOCAL)
7025                 goto out;
7026
7027         /* Ensure the device has been registrered */
7028         if (dev->reg_state != NETREG_REGISTERED)
7029                 goto out;
7030
7031         /* Get out if there is nothing todo */
7032         err = 0;
7033         if (net_eq(dev_net(dev), net))
7034                 goto out;
7035
7036         /* Pick the destination device name, and ensure
7037          * we can use it in the destination network namespace.
7038          */
7039         err = -EEXIST;
7040         if (__dev_get_by_name(net, dev->name)) {
7041                 /* We get here if we can't use the current device name */
7042                 if (!pat)
7043                         goto out;
7044                 if (dev_get_valid_name(net, dev, pat) < 0)
7045                         goto out;
7046         }
7047
7048         /*
7049          * And now a mini version of register_netdevice unregister_netdevice.
7050          */
7051
7052         /* If device is running close it first. */
7053         dev_close(dev);
7054
7055         /* And unlink it from device chain */
7056         err = -ENODEV;
7057         unlist_netdevice(dev);
7058
7059         synchronize_net();
7060
7061         /* Shutdown queueing discipline. */
7062         dev_shutdown(dev);
7063
7064         /* Notify protocols, that we are about to destroy
7065            this device. They should clean all the things.
7066
7067            Note that dev->reg_state stays at NETREG_REGISTERED.
7068            This is wanted because this way 8021q and macvlan know
7069            the device is just moving and can keep their slaves up.
7070         */
7071         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7072         rcu_barrier();
7073         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7074         rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
7075
7076         /*
7077          *      Flush the unicast and multicast chains
7078          */
7079         dev_uc_flush(dev);
7080         dev_mc_flush(dev);
7081
7082         /* Send a netdev-removed uevent to the old namespace */
7083         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
7084         netdev_adjacent_del_links(dev);
7085
7086         /* Actually switch the network namespace */
7087         dev_net_set(dev, net);
7088
7089         /* If there is an ifindex conflict assign a new one */
7090         if (__dev_get_by_index(net, dev->ifindex))
7091                 dev->ifindex = dev_new_index(net);
7092
7093         /* Send a netdev-add uevent to the new namespace */
7094         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
7095         netdev_adjacent_add_links(dev);
7096
7097         /* Fixup kobjects */
7098         err = device_rename(&dev->dev, dev->name);
7099         WARN_ON(err);
7100
7101         /* Add the device back in the hashes */
7102         list_netdevice(dev);
7103
7104         /* Notify protocols, that a new device appeared. */
7105         call_netdevice_notifiers(NETDEV_REGISTER, dev);
7106
7107         /*
7108          *      Prevent userspace races by waiting until the network
7109          *      device is fully setup before sending notifications.
7110          */
7111         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7112
7113         synchronize_net();
7114         err = 0;
7115 out:
7116         return err;
7117 }
7118 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
7119
7120 static int dev_cpu_callback(struct notifier_block *nfb,
7121                             unsigned long action,
7122                             void *ocpu)
7123 {
7124         struct sk_buff **list_skb;
7125         struct sk_buff *skb;
7126         unsigned int cpu, oldcpu = (unsigned long)ocpu;
7127         struct softnet_data *sd, *oldsd;
7128
7129         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
7130                 return NOTIFY_OK;
7131
7132         local_irq_disable();
7133         cpu = smp_processor_id();
7134         sd = &per_cpu(softnet_data, cpu);
7135         oldsd = &per_cpu(softnet_data, oldcpu);
7136
7137         /* Find end of our completion_queue. */
7138         list_skb = &sd->completion_queue;
7139         while (*list_skb)
7140                 list_skb = &(*list_skb)->next;
7141         /* Append completion queue from offline CPU. */
7142         *list_skb = oldsd->completion_queue;
7143         oldsd->completion_queue = NULL;
7144
7145         /* Append output queue from offline CPU. */
7146         if (oldsd->output_queue) {
7147                 *sd->output_queue_tailp = oldsd->output_queue;
7148                 sd->output_queue_tailp = oldsd->output_queue_tailp;
7149                 oldsd->output_queue = NULL;
7150                 oldsd->output_queue_tailp = &oldsd->output_queue;
7151         }
7152         /* Append NAPI poll list from offline CPU, with one exception :
7153          * process_backlog() must be called by cpu owning percpu backlog.
7154          * We properly handle process_queue & input_pkt_queue later.
7155          */
7156         while (!list_empty(&oldsd->poll_list)) {
7157                 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7158                                                             struct napi_struct,
7159                                                             poll_list);
7160
7161                 list_del_init(&napi->poll_list);
7162                 if (napi->poll == process_backlog)
7163                         napi->state = 0;
7164                 else
7165                         ____napi_schedule(sd, napi);
7166         }
7167
7168         raise_softirq_irqoff(NET_TX_SOFTIRQ);
7169         local_irq_enable();
7170
7171         /* Process offline CPU's input_pkt_queue */
7172         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
7173                 netif_rx_ni(skb);
7174                 input_queue_head_incr(oldsd);
7175         }
7176         while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
7177                 netif_rx_ni(skb);
7178                 input_queue_head_incr(oldsd);
7179         }
7180
7181         return NOTIFY_OK;
7182 }
7183
7184
7185 /**
7186  *      netdev_increment_features - increment feature set by one
7187  *      @all: current feature set
7188  *      @one: new feature set
7189  *      @mask: mask feature set
7190  *
7191  *      Computes a new feature set after adding a device with feature set
7192  *      @one to the master device with current feature set @all.  Will not
7193  *      enable anything that is off in @mask. Returns the new feature set.
7194  */
7195 netdev_features_t netdev_increment_features(netdev_features_t all,
7196         netdev_features_t one, netdev_features_t mask)
7197 {
7198         if (mask & NETIF_F_GEN_CSUM)
7199                 mask |= NETIF_F_ALL_CSUM;
7200         mask |= NETIF_F_VLAN_CHALLENGED;
7201
7202         all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7203         all &= one | ~NETIF_F_ALL_FOR_ALL;
7204
7205         /* If one device supports hw checksumming, set for all. */
7206         if (all & NETIF_F_GEN_CSUM)
7207                 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7208
7209         return all;
7210 }
7211 EXPORT_SYMBOL(netdev_increment_features);
7212
7213 static struct hlist_head * __net_init netdev_create_hash(void)
7214 {
7215         int i;
7216         struct hlist_head *hash;
7217
7218         hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7219         if (hash != NULL)
7220                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7221                         INIT_HLIST_HEAD(&hash[i]);
7222
7223         return hash;
7224 }
7225
7226 /* Initialize per network namespace state */
7227 static int __net_init netdev_init(struct net *net)
7228 {
7229         if (net != &init_net)
7230                 INIT_LIST_HEAD(&net->dev_base_head);
7231
7232         net->dev_name_head = netdev_create_hash();
7233         if (net->dev_name_head == NULL)
7234                 goto err_name;
7235
7236         net->dev_index_head = netdev_create_hash();
7237         if (net->dev_index_head == NULL)
7238                 goto err_idx;
7239
7240         return 0;
7241
7242 err_idx:
7243         kfree(net->dev_name_head);
7244 err_name:
7245         return -ENOMEM;
7246 }
7247
7248 /**
7249  *      netdev_drivername - network driver for the device
7250  *      @dev: network device
7251  *
7252  *      Determine network driver for device.
7253  */
7254 const char *netdev_drivername(const struct net_device *dev)
7255 {
7256         const struct device_driver *driver;
7257         const struct device *parent;
7258         const char *empty = "";
7259
7260         parent = dev->dev.parent;
7261         if (!parent)
7262                 return empty;
7263
7264         driver = parent->driver;
7265         if (driver && driver->name)
7266                 return driver->name;
7267         return empty;
7268 }
7269
7270 static void __netdev_printk(const char *level, const struct net_device *dev,
7271                             struct va_format *vaf)
7272 {
7273         if (dev && dev->dev.parent) {
7274                 dev_printk_emit(level[1] - '0',
7275                                 dev->dev.parent,
7276                                 "%s %s %s%s: %pV",
7277                                 dev_driver_string(dev->dev.parent),
7278                                 dev_name(dev->dev.parent),
7279                                 netdev_name(dev), netdev_reg_state(dev),
7280                                 vaf);
7281         } else if (dev) {
7282                 printk("%s%s%s: %pV",
7283                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
7284         } else {
7285                 printk("%s(NULL net_device): %pV", level, vaf);
7286         }
7287 }
7288
7289 void netdev_printk(const char *level, const struct net_device *dev,
7290                    const char *format, ...)
7291 {
7292         struct va_format vaf;
7293         va_list args;
7294
7295         va_start(args, format);
7296
7297         vaf.fmt = format;
7298         vaf.va = &args;
7299
7300         __netdev_printk(level, dev, &vaf);
7301
7302         va_end(args);
7303 }
7304 EXPORT_SYMBOL(netdev_printk);
7305
7306 #define define_netdev_printk_level(func, level)                 \
7307 void func(const struct net_device *dev, const char *fmt, ...)   \
7308 {                                                               \
7309         struct va_format vaf;                                   \
7310         va_list args;                                           \
7311                                                                 \
7312         va_start(args, fmt);                                    \
7313                                                                 \
7314         vaf.fmt = fmt;                                          \
7315         vaf.va = &args;                                         \
7316                                                                 \
7317         __netdev_printk(level, dev, &vaf);                      \
7318                                                                 \
7319         va_end(args);                                           \
7320 }                                                               \
7321 EXPORT_SYMBOL(func);
7322
7323 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7324 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7325 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7326 define_netdev_printk_level(netdev_err, KERN_ERR);
7327 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7328 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7329 define_netdev_printk_level(netdev_info, KERN_INFO);
7330
7331 static void __net_exit netdev_exit(struct net *net)
7332 {
7333         kfree(net->dev_name_head);
7334         kfree(net->dev_index_head);
7335 }
7336
7337 static struct pernet_operations __net_initdata netdev_net_ops = {
7338         .init = netdev_init,
7339         .exit = netdev_exit,
7340 };
7341
7342 static void __net_exit default_device_exit(struct net *net)
7343 {
7344         struct net_device *dev, *aux;
7345         /*
7346          * Push all migratable network devices back to the
7347          * initial network namespace
7348          */
7349         rtnl_lock();
7350         for_each_netdev_safe(net, dev, aux) {
7351                 int err;
7352                 char fb_name[IFNAMSIZ];
7353
7354                 /* Ignore unmoveable devices (i.e. loopback) */
7355                 if (dev->features & NETIF_F_NETNS_LOCAL)
7356                         continue;
7357
7358                 /* Leave virtual devices for the generic cleanup */
7359                 if (dev->rtnl_link_ops)
7360                         continue;
7361
7362                 /* Push remaining network devices to init_net */
7363                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7364                 err = dev_change_net_namespace(dev, &init_net, fb_name);
7365                 if (err) {
7366                         pr_emerg("%s: failed to move %s to init_net: %d\n",
7367                                  __func__, dev->name, err);
7368                         BUG();
7369                 }
7370         }
7371         rtnl_unlock();
7372 }
7373
7374 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7375 {
7376         /* Return with the rtnl_lock held when there are no network
7377          * devices unregistering in any network namespace in net_list.
7378          */
7379         struct net *net;
7380         bool unregistering;
7381         DEFINE_WAIT_FUNC(wait, woken_wake_function);
7382
7383         add_wait_queue(&netdev_unregistering_wq, &wait);
7384         for (;;) {
7385                 unregistering = false;
7386                 rtnl_lock();
7387                 list_for_each_entry(net, net_list, exit_list) {
7388                         if (net->dev_unreg_count > 0) {
7389                                 unregistering = true;
7390                                 break;
7391                         }
7392                 }
7393                 if (!unregistering)
7394                         break;
7395                 __rtnl_unlock();
7396
7397                 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
7398         }
7399         remove_wait_queue(&netdev_unregistering_wq, &wait);
7400 }
7401
7402 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7403 {
7404         /* At exit all network devices most be removed from a network
7405          * namespace.  Do this in the reverse order of registration.
7406          * Do this across as many network namespaces as possible to
7407          * improve batching efficiency.
7408          */
7409         struct net_device *dev;
7410         struct net *net;
7411         LIST_HEAD(dev_kill_list);
7412
7413         /* To prevent network device cleanup code from dereferencing
7414          * loopback devices or network devices that have been freed
7415          * wait here for all pending unregistrations to complete,
7416          * before unregistring the loopback device and allowing the
7417          * network namespace be freed.
7418          *
7419          * The netdev todo list containing all network devices
7420          * unregistrations that happen in default_device_exit_batch
7421          * will run in the rtnl_unlock() at the end of
7422          * default_device_exit_batch.
7423          */
7424         rtnl_lock_unregistering(net_list);
7425         list_for_each_entry(net, net_list, exit_list) {
7426                 for_each_netdev_reverse(net, dev) {
7427                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7428                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7429                         else
7430                                 unregister_netdevice_queue(dev, &dev_kill_list);
7431                 }
7432         }
7433         unregister_netdevice_many(&dev_kill_list);
7434         rtnl_unlock();
7435 }
7436
7437 static struct pernet_operations __net_initdata default_device_ops = {
7438         .exit = default_device_exit,
7439         .exit_batch = default_device_exit_batch,
7440 };
7441
7442 /*
7443  *      Initialize the DEV module. At boot time this walks the device list and
7444  *      unhooks any devices that fail to initialise (normally hardware not
7445  *      present) and leaves us with a valid list of present and active devices.
7446  *
7447  */
7448
7449 /*
7450  *       This is called single threaded during boot, so no need
7451  *       to take the rtnl semaphore.
7452  */
7453 static int __init net_dev_init(void)
7454 {
7455         int i, rc = -ENOMEM;
7456
7457         BUG_ON(!dev_boot_phase);
7458
7459         if (dev_proc_init())
7460                 goto out;
7461
7462         if (netdev_kobject_init())
7463                 goto out;
7464
7465         INIT_LIST_HEAD(&ptype_all);
7466         for (i = 0; i < PTYPE_HASH_SIZE; i++)
7467                 INIT_LIST_HEAD(&ptype_base[i]);
7468
7469         INIT_LIST_HEAD(&offload_base);
7470
7471         if (register_pernet_subsys(&netdev_net_ops))
7472                 goto out;
7473
7474         /*
7475          *      Initialise the packet receive queues.
7476          */
7477
7478         for_each_possible_cpu(i) {
7479                 struct softnet_data *sd = &per_cpu(softnet_data, i);
7480
7481                 skb_queue_head_init(&sd->input_pkt_queue);
7482                 skb_queue_head_init(&sd->process_queue);
7483                 INIT_LIST_HEAD(&sd->poll_list);
7484                 sd->output_queue_tailp = &sd->output_queue;
7485 #ifdef CONFIG_RPS
7486                 sd->csd.func = rps_trigger_softirq;
7487                 sd->csd.info = sd;
7488                 sd->cpu = i;
7489 #endif
7490
7491                 sd->backlog.poll = process_backlog;
7492                 sd->backlog.weight = weight_p;
7493         }
7494
7495         dev_boot_phase = 0;
7496
7497         /* The loopback device is special if any other network devices
7498          * is present in a network namespace the loopback device must
7499          * be present. Since we now dynamically allocate and free the
7500          * loopback device ensure this invariant is maintained by
7501          * keeping the loopback device as the first device on the
7502          * list of network devices.  Ensuring the loopback devices
7503          * is the first device that appears and the last network device
7504          * that disappears.
7505          */
7506         if (register_pernet_device(&loopback_net_ops))
7507                 goto out;
7508
7509         if (register_pernet_device(&default_device_ops))
7510                 goto out;
7511
7512         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7513         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7514
7515         hotcpu_notifier(dev_cpu_callback, 0);
7516         dst_init();
7517         rc = 0;
7518 out:
7519         return rc;
7520 }
7521
7522 subsys_initcall(net_dev_init);