]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/team/team.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / drivers / net / team / team.c
1 /*
2  * drivers/net/team/team.c - Network team device driver
3  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/netpoll.h>
22 #include <linux/if_vlan.h>
23 #include <linux/if_arp.h>
24 #include <linux/socket.h>
25 #include <linux/etherdevice.h>
26 #include <linux/rtnetlink.h>
27 #include <net/rtnetlink.h>
28 #include <net/genetlink.h>
29 #include <net/netlink.h>
30 #include <net/sch_generic.h>
31 #include <generated/utsrelease.h>
32 #include <linux/if_team.h>
33
34 #define DRV_NAME "team"
35
36
37 /**********
38  * Helpers
39  **********/
40
41 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
42 {
43         struct team_port *port = rtnl_dereference(dev->rx_handler_data);
44
45         return netif_is_team_port(dev) ? port : NULL;
46 }
47
48 /*
49  * Since the ability to change device address for open port device is tested in
50  * team_port_add, this function can be called without control of return value
51  */
52 static int __set_port_dev_addr(struct net_device *port_dev,
53                                const unsigned char *dev_addr)
54 {
55         struct sockaddr_storage addr;
56
57         memcpy(addr.__data, dev_addr, port_dev->addr_len);
58         addr.ss_family = port_dev->type;
59         return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
60 }
61
62 static int team_port_set_orig_dev_addr(struct team_port *port)
63 {
64         return __set_port_dev_addr(port->dev, port->orig.dev_addr);
65 }
66
67 static int team_port_set_team_dev_addr(struct team *team,
68                                        struct team_port *port)
69 {
70         return __set_port_dev_addr(port->dev, team->dev->dev_addr);
71 }
72
73 int team_modeop_port_enter(struct team *team, struct team_port *port)
74 {
75         return team_port_set_team_dev_addr(team, port);
76 }
77 EXPORT_SYMBOL(team_modeop_port_enter);
78
79 void team_modeop_port_change_dev_addr(struct team *team,
80                                       struct team_port *port)
81 {
82         team_port_set_team_dev_addr(team, port);
83 }
84 EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
85
86 static void team_lower_state_changed(struct team_port *port)
87 {
88         struct netdev_lag_lower_state_info info;
89
90         info.link_up = port->linkup;
91         info.tx_enabled = team_port_enabled(port);
92         netdev_lower_state_changed(port->dev, &info);
93 }
94
95 static void team_refresh_port_linkup(struct team_port *port)
96 {
97         bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
98                                                       port->state.linkup;
99
100         if (port->linkup != new_linkup) {
101                 port->linkup = new_linkup;
102                 team_lower_state_changed(port);
103         }
104 }
105
106
107 /*******************
108  * Options handling
109  *******************/
110
111 struct team_option_inst { /* One for each option instance */
112         struct list_head list;
113         struct list_head tmp_list;
114         struct team_option *option;
115         struct team_option_inst_info info;
116         bool changed;
117         bool removed;
118 };
119
120 static struct team_option *__team_find_option(struct team *team,
121                                               const char *opt_name)
122 {
123         struct team_option *option;
124
125         list_for_each_entry(option, &team->option_list, list) {
126                 if (strcmp(option->name, opt_name) == 0)
127                         return option;
128         }
129         return NULL;
130 }
131
132 static void __team_option_inst_del(struct team_option_inst *opt_inst)
133 {
134         list_del(&opt_inst->list);
135         kfree(opt_inst);
136 }
137
138 static void __team_option_inst_del_option(struct team *team,
139                                           struct team_option *option)
140 {
141         struct team_option_inst *opt_inst, *tmp;
142
143         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
144                 if (opt_inst->option == option)
145                         __team_option_inst_del(opt_inst);
146         }
147 }
148
149 static int __team_option_inst_add(struct team *team, struct team_option *option,
150                                   struct team_port *port)
151 {
152         struct team_option_inst *opt_inst;
153         unsigned int array_size;
154         unsigned int i;
155         int err;
156
157         array_size = option->array_size;
158         if (!array_size)
159                 array_size = 1; /* No array but still need one instance */
160
161         for (i = 0; i < array_size; i++) {
162                 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
163                 if (!opt_inst)
164                         return -ENOMEM;
165                 opt_inst->option = option;
166                 opt_inst->info.port = port;
167                 opt_inst->info.array_index = i;
168                 opt_inst->changed = true;
169                 opt_inst->removed = false;
170                 list_add_tail(&opt_inst->list, &team->option_inst_list);
171                 if (option->init) {
172                         err = option->init(team, &opt_inst->info);
173                         if (err)
174                                 return err;
175                 }
176
177         }
178         return 0;
179 }
180
181 static int __team_option_inst_add_option(struct team *team,
182                                          struct team_option *option)
183 {
184         int err;
185
186         if (!option->per_port) {
187                 err = __team_option_inst_add(team, option, NULL);
188                 if (err)
189                         goto inst_del_option;
190         }
191         return 0;
192
193 inst_del_option:
194         __team_option_inst_del_option(team, option);
195         return err;
196 }
197
198 static void __team_option_inst_mark_removed_option(struct team *team,
199                                                    struct team_option *option)
200 {
201         struct team_option_inst *opt_inst;
202
203         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
204                 if (opt_inst->option == option) {
205                         opt_inst->changed = true;
206                         opt_inst->removed = true;
207                 }
208         }
209 }
210
211 static void __team_option_inst_del_port(struct team *team,
212                                         struct team_port *port)
213 {
214         struct team_option_inst *opt_inst, *tmp;
215
216         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
217                 if (opt_inst->option->per_port &&
218                     opt_inst->info.port == port)
219                         __team_option_inst_del(opt_inst);
220         }
221 }
222
223 static int __team_option_inst_add_port(struct team *team,
224                                        struct team_port *port)
225 {
226         struct team_option *option;
227         int err;
228
229         list_for_each_entry(option, &team->option_list, list) {
230                 if (!option->per_port)
231                         continue;
232                 err = __team_option_inst_add(team, option, port);
233                 if (err)
234                         goto inst_del_port;
235         }
236         return 0;
237
238 inst_del_port:
239         __team_option_inst_del_port(team, port);
240         return err;
241 }
242
243 static void __team_option_inst_mark_removed_port(struct team *team,
244                                                  struct team_port *port)
245 {
246         struct team_option_inst *opt_inst;
247
248         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
249                 if (opt_inst->info.port == port) {
250                         opt_inst->changed = true;
251                         opt_inst->removed = true;
252                 }
253         }
254 }
255
256 static int __team_options_register(struct team *team,
257                                    const struct team_option *option,
258                                    size_t option_count)
259 {
260         int i;
261         struct team_option **dst_opts;
262         int err;
263
264         dst_opts = kcalloc(option_count, sizeof(struct team_option *),
265                            GFP_KERNEL);
266         if (!dst_opts)
267                 return -ENOMEM;
268         for (i = 0; i < option_count; i++, option++) {
269                 if (__team_find_option(team, option->name)) {
270                         err = -EEXIST;
271                         goto alloc_rollback;
272                 }
273                 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
274                 if (!dst_opts[i]) {
275                         err = -ENOMEM;
276                         goto alloc_rollback;
277                 }
278         }
279
280         for (i = 0; i < option_count; i++) {
281                 err = __team_option_inst_add_option(team, dst_opts[i]);
282                 if (err)
283                         goto inst_rollback;
284                 list_add_tail(&dst_opts[i]->list, &team->option_list);
285         }
286
287         kfree(dst_opts);
288         return 0;
289
290 inst_rollback:
291         for (i--; i >= 0; i--)
292                 __team_option_inst_del_option(team, dst_opts[i]);
293
294         i = option_count - 1;
295 alloc_rollback:
296         for (i--; i >= 0; i--)
297                 kfree(dst_opts[i]);
298
299         kfree(dst_opts);
300         return err;
301 }
302
303 static void __team_options_mark_removed(struct team *team,
304                                         const struct team_option *option,
305                                         size_t option_count)
306 {
307         int i;
308
309         for (i = 0; i < option_count; i++, option++) {
310                 struct team_option *del_opt;
311
312                 del_opt = __team_find_option(team, option->name);
313                 if (del_opt)
314                         __team_option_inst_mark_removed_option(team, del_opt);
315         }
316 }
317
318 static void __team_options_unregister(struct team *team,
319                                       const struct team_option *option,
320                                       size_t option_count)
321 {
322         int i;
323
324         for (i = 0; i < option_count; i++, option++) {
325                 struct team_option *del_opt;
326
327                 del_opt = __team_find_option(team, option->name);
328                 if (del_opt) {
329                         __team_option_inst_del_option(team, del_opt);
330                         list_del(&del_opt->list);
331                         kfree(del_opt);
332                 }
333         }
334 }
335
336 static void __team_options_change_check(struct team *team);
337
338 int team_options_register(struct team *team,
339                           const struct team_option *option,
340                           size_t option_count)
341 {
342         int err;
343
344         err = __team_options_register(team, option, option_count);
345         if (err)
346                 return err;
347         __team_options_change_check(team);
348         return 0;
349 }
350 EXPORT_SYMBOL(team_options_register);
351
352 void team_options_unregister(struct team *team,
353                              const struct team_option *option,
354                              size_t option_count)
355 {
356         __team_options_mark_removed(team, option, option_count);
357         __team_options_change_check(team);
358         __team_options_unregister(team, option, option_count);
359 }
360 EXPORT_SYMBOL(team_options_unregister);
361
362 static int team_option_get(struct team *team,
363                            struct team_option_inst *opt_inst,
364                            struct team_gsetter_ctx *ctx)
365 {
366         if (!opt_inst->option->getter)
367                 return -EOPNOTSUPP;
368         return opt_inst->option->getter(team, ctx);
369 }
370
371 static int team_option_set(struct team *team,
372                            struct team_option_inst *opt_inst,
373                            struct team_gsetter_ctx *ctx)
374 {
375         if (!opt_inst->option->setter)
376                 return -EOPNOTSUPP;
377         return opt_inst->option->setter(team, ctx);
378 }
379
380 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
381 {
382         struct team_option_inst *opt_inst;
383
384         opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
385         opt_inst->changed = true;
386 }
387 EXPORT_SYMBOL(team_option_inst_set_change);
388
389 void team_options_change_check(struct team *team)
390 {
391         __team_options_change_check(team);
392 }
393 EXPORT_SYMBOL(team_options_change_check);
394
395
396 /****************
397  * Mode handling
398  ****************/
399
400 static LIST_HEAD(mode_list);
401 static DEFINE_SPINLOCK(mode_list_lock);
402
403 struct team_mode_item {
404         struct list_head list;
405         const struct team_mode *mode;
406 };
407
408 static struct team_mode_item *__find_mode(const char *kind)
409 {
410         struct team_mode_item *mitem;
411
412         list_for_each_entry(mitem, &mode_list, list) {
413                 if (strcmp(mitem->mode->kind, kind) == 0)
414                         return mitem;
415         }
416         return NULL;
417 }
418
419 static bool is_good_mode_name(const char *name)
420 {
421         while (*name != '\0') {
422                 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
423                         return false;
424                 name++;
425         }
426         return true;
427 }
428
429 int team_mode_register(const struct team_mode *mode)
430 {
431         int err = 0;
432         struct team_mode_item *mitem;
433
434         if (!is_good_mode_name(mode->kind) ||
435             mode->priv_size > TEAM_MODE_PRIV_SIZE)
436                 return -EINVAL;
437
438         mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
439         if (!mitem)
440                 return -ENOMEM;
441
442         spin_lock(&mode_list_lock);
443         if (__find_mode(mode->kind)) {
444                 err = -EEXIST;
445                 kfree(mitem);
446                 goto unlock;
447         }
448         mitem->mode = mode;
449         list_add_tail(&mitem->list, &mode_list);
450 unlock:
451         spin_unlock(&mode_list_lock);
452         return err;
453 }
454 EXPORT_SYMBOL(team_mode_register);
455
456 void team_mode_unregister(const struct team_mode *mode)
457 {
458         struct team_mode_item *mitem;
459
460         spin_lock(&mode_list_lock);
461         mitem = __find_mode(mode->kind);
462         if (mitem) {
463                 list_del_init(&mitem->list);
464                 kfree(mitem);
465         }
466         spin_unlock(&mode_list_lock);
467 }
468 EXPORT_SYMBOL(team_mode_unregister);
469
470 static const struct team_mode *team_mode_get(const char *kind)
471 {
472         struct team_mode_item *mitem;
473         const struct team_mode *mode = NULL;
474
475         spin_lock(&mode_list_lock);
476         mitem = __find_mode(kind);
477         if (!mitem) {
478                 spin_unlock(&mode_list_lock);
479                 request_module("team-mode-%s", kind);
480                 spin_lock(&mode_list_lock);
481                 mitem = __find_mode(kind);
482         }
483         if (mitem) {
484                 mode = mitem->mode;
485                 if (!try_module_get(mode->owner))
486                         mode = NULL;
487         }
488
489         spin_unlock(&mode_list_lock);
490         return mode;
491 }
492
493 static void team_mode_put(const struct team_mode *mode)
494 {
495         module_put(mode->owner);
496 }
497
498 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
499 {
500         dev_kfree_skb_any(skb);
501         return false;
502 }
503
504 static rx_handler_result_t team_dummy_receive(struct team *team,
505                                               struct team_port *port,
506                                               struct sk_buff *skb)
507 {
508         return RX_HANDLER_ANOTHER;
509 }
510
511 static const struct team_mode __team_no_mode = {
512         .kind           = "*NOMODE*",
513 };
514
515 static bool team_is_mode_set(struct team *team)
516 {
517         return team->mode != &__team_no_mode;
518 }
519
520 static void team_set_no_mode(struct team *team)
521 {
522         team->user_carrier_enabled = false;
523         team->mode = &__team_no_mode;
524 }
525
526 static void team_adjust_ops(struct team *team)
527 {
528         /*
529          * To avoid checks in rx/tx skb paths, ensure here that non-null and
530          * correct ops are always set.
531          */
532
533         if (!team->en_port_count || !team_is_mode_set(team) ||
534             !team->mode->ops->transmit)
535                 team->ops.transmit = team_dummy_transmit;
536         else
537                 team->ops.transmit = team->mode->ops->transmit;
538
539         if (!team->en_port_count || !team_is_mode_set(team) ||
540             !team->mode->ops->receive)
541                 team->ops.receive = team_dummy_receive;
542         else
543                 team->ops.receive = team->mode->ops->receive;
544 }
545
546 /*
547  * We can benefit from the fact that it's ensured no port is present
548  * at the time of mode change. Therefore no packets are in fly so there's no
549  * need to set mode operations in any special way.
550  */
551 static int __team_change_mode(struct team *team,
552                               const struct team_mode *new_mode)
553 {
554         /* Check if mode was previously set and do cleanup if so */
555         if (team_is_mode_set(team)) {
556                 void (*exit_op)(struct team *team) = team->ops.exit;
557
558                 /* Clear ops area so no callback is called any longer */
559                 memset(&team->ops, 0, sizeof(struct team_mode_ops));
560                 team_adjust_ops(team);
561
562                 if (exit_op)
563                         exit_op(team);
564                 team_mode_put(team->mode);
565                 team_set_no_mode(team);
566                 /* zero private data area */
567                 memset(&team->mode_priv, 0,
568                        sizeof(struct team) - offsetof(struct team, mode_priv));
569         }
570
571         if (!new_mode)
572                 return 0;
573
574         if (new_mode->ops->init) {
575                 int err;
576
577                 err = new_mode->ops->init(team);
578                 if (err)
579                         return err;
580         }
581
582         team->mode = new_mode;
583         memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
584         team_adjust_ops(team);
585
586         return 0;
587 }
588
589 static int team_change_mode(struct team *team, const char *kind)
590 {
591         const struct team_mode *new_mode;
592         struct net_device *dev = team->dev;
593         int err;
594
595         if (!list_empty(&team->port_list)) {
596                 netdev_err(dev, "No ports can be present during mode change\n");
597                 return -EBUSY;
598         }
599
600         if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
601                 netdev_err(dev, "Unable to change to the same mode the team is in\n");
602                 return -EINVAL;
603         }
604
605         new_mode = team_mode_get(kind);
606         if (!new_mode) {
607                 netdev_err(dev, "Mode \"%s\" not found\n", kind);
608                 return -EINVAL;
609         }
610
611         err = __team_change_mode(team, new_mode);
612         if (err) {
613                 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
614                 team_mode_put(new_mode);
615                 return err;
616         }
617
618         netdev_info(dev, "Mode changed to \"%s\"\n", kind);
619         return 0;
620 }
621
622
623 /*********************
624  * Peers notification
625  *********************/
626
627 static void team_notify_peers_work(struct work_struct *work)
628 {
629         struct team *team;
630         int val;
631
632         team = container_of(work, struct team, notify_peers.dw.work);
633
634         if (!rtnl_trylock()) {
635                 schedule_delayed_work(&team->notify_peers.dw, 0);
636                 return;
637         }
638         val = atomic_dec_if_positive(&team->notify_peers.count_pending);
639         if (val < 0) {
640                 rtnl_unlock();
641                 return;
642         }
643         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
644         rtnl_unlock();
645         if (val)
646                 schedule_delayed_work(&team->notify_peers.dw,
647                                       msecs_to_jiffies(team->notify_peers.interval));
648 }
649
650 static void team_notify_peers(struct team *team)
651 {
652         if (!team->notify_peers.count || !netif_running(team->dev))
653                 return;
654         atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
655         schedule_delayed_work(&team->notify_peers.dw, 0);
656 }
657
658 static void team_notify_peers_init(struct team *team)
659 {
660         INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
661 }
662
663 static void team_notify_peers_fini(struct team *team)
664 {
665         cancel_delayed_work_sync(&team->notify_peers.dw);
666 }
667
668
669 /*******************************
670  * Send multicast group rejoins
671  *******************************/
672
673 static void team_mcast_rejoin_work(struct work_struct *work)
674 {
675         struct team *team;
676         int val;
677
678         team = container_of(work, struct team, mcast_rejoin.dw.work);
679
680         if (!rtnl_trylock()) {
681                 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
682                 return;
683         }
684         val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
685         if (val < 0) {
686                 rtnl_unlock();
687                 return;
688         }
689         call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
690         rtnl_unlock();
691         if (val)
692                 schedule_delayed_work(&team->mcast_rejoin.dw,
693                                       msecs_to_jiffies(team->mcast_rejoin.interval));
694 }
695
696 static void team_mcast_rejoin(struct team *team)
697 {
698         if (!team->mcast_rejoin.count || !netif_running(team->dev))
699                 return;
700         atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
701         schedule_delayed_work(&team->mcast_rejoin.dw, 0);
702 }
703
704 static void team_mcast_rejoin_init(struct team *team)
705 {
706         INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
707 }
708
709 static void team_mcast_rejoin_fini(struct team *team)
710 {
711         cancel_delayed_work_sync(&team->mcast_rejoin.dw);
712 }
713
714
715 /************************
716  * Rx path frame handler
717  ************************/
718
719 /* note: already called with rcu_read_lock */
720 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
721 {
722         struct sk_buff *skb = *pskb;
723         struct team_port *port;
724         struct team *team;
725         rx_handler_result_t res;
726
727         skb = skb_share_check(skb, GFP_ATOMIC);
728         if (!skb)
729                 return RX_HANDLER_CONSUMED;
730
731         *pskb = skb;
732
733         port = team_port_get_rcu(skb->dev);
734         team = port->team;
735         if (!team_port_enabled(port)) {
736                 /* allow exact match delivery for disabled ports */
737                 res = RX_HANDLER_EXACT;
738         } else {
739                 res = team->ops.receive(team, port, skb);
740         }
741         if (res == RX_HANDLER_ANOTHER) {
742                 struct team_pcpu_stats *pcpu_stats;
743
744                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
745                 u64_stats_update_begin(&pcpu_stats->syncp);
746                 pcpu_stats->rx_packets++;
747                 pcpu_stats->rx_bytes += skb->len;
748                 if (skb->pkt_type == PACKET_MULTICAST)
749                         pcpu_stats->rx_multicast++;
750                 u64_stats_update_end(&pcpu_stats->syncp);
751
752                 skb->dev = team->dev;
753         } else if (res == RX_HANDLER_EXACT) {
754                 this_cpu_inc(team->pcpu_stats->rx_nohandler);
755         } else {
756                 this_cpu_inc(team->pcpu_stats->rx_dropped);
757         }
758
759         return res;
760 }
761
762
763 /*************************************
764  * Multiqueue Tx port select override
765  *************************************/
766
767 static int team_queue_override_init(struct team *team)
768 {
769         struct list_head *listarr;
770         unsigned int queue_cnt = team->dev->num_tx_queues - 1;
771         unsigned int i;
772
773         if (!queue_cnt)
774                 return 0;
775         listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
776                                 GFP_KERNEL);
777         if (!listarr)
778                 return -ENOMEM;
779         team->qom_lists = listarr;
780         for (i = 0; i < queue_cnt; i++)
781                 INIT_LIST_HEAD(listarr++);
782         return 0;
783 }
784
785 static void team_queue_override_fini(struct team *team)
786 {
787         kfree(team->qom_lists);
788 }
789
790 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
791 {
792         return &team->qom_lists[queue_id - 1];
793 }
794
795 /*
796  * note: already called with rcu_read_lock
797  */
798 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
799 {
800         struct list_head *qom_list;
801         struct team_port *port;
802
803         if (!team->queue_override_enabled || !skb->queue_mapping)
804                 return false;
805         qom_list = __team_get_qom_list(team, skb->queue_mapping);
806         list_for_each_entry_rcu(port, qom_list, qom_list) {
807                 if (!team_dev_queue_xmit(team, port, skb))
808                         return true;
809         }
810         return false;
811 }
812
813 static void __team_queue_override_port_del(struct team *team,
814                                            struct team_port *port)
815 {
816         if (!port->queue_id)
817                 return;
818         list_del_rcu(&port->qom_list);
819 }
820
821 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
822                                                       struct team_port *cur)
823 {
824         if (port->priority < cur->priority)
825                 return true;
826         if (port->priority > cur->priority)
827                 return false;
828         if (port->index < cur->index)
829                 return true;
830         return false;
831 }
832
833 static void __team_queue_override_port_add(struct team *team,
834                                            struct team_port *port)
835 {
836         struct team_port *cur;
837         struct list_head *qom_list;
838         struct list_head *node;
839
840         if (!port->queue_id)
841                 return;
842         qom_list = __team_get_qom_list(team, port->queue_id);
843         node = qom_list;
844         list_for_each_entry(cur, qom_list, qom_list) {
845                 if (team_queue_override_port_has_gt_prio_than(port, cur))
846                         break;
847                 node = &cur->qom_list;
848         }
849         list_add_tail_rcu(&port->qom_list, node);
850 }
851
852 static void __team_queue_override_enabled_check(struct team *team)
853 {
854         struct team_port *port;
855         bool enabled = false;
856
857         list_for_each_entry(port, &team->port_list, list) {
858                 if (port->queue_id) {
859                         enabled = true;
860                         break;
861                 }
862         }
863         if (enabled == team->queue_override_enabled)
864                 return;
865         netdev_dbg(team->dev, "%s queue override\n",
866                    enabled ? "Enabling" : "Disabling");
867         team->queue_override_enabled = enabled;
868 }
869
870 static void team_queue_override_port_prio_changed(struct team *team,
871                                                   struct team_port *port)
872 {
873         if (!port->queue_id || team_port_enabled(port))
874                 return;
875         __team_queue_override_port_del(team, port);
876         __team_queue_override_port_add(team, port);
877         __team_queue_override_enabled_check(team);
878 }
879
880 static void team_queue_override_port_change_queue_id(struct team *team,
881                                                      struct team_port *port,
882                                                      u16 new_queue_id)
883 {
884         if (team_port_enabled(port)) {
885                 __team_queue_override_port_del(team, port);
886                 port->queue_id = new_queue_id;
887                 __team_queue_override_port_add(team, port);
888                 __team_queue_override_enabled_check(team);
889         } else {
890                 port->queue_id = new_queue_id;
891         }
892 }
893
894 static void team_queue_override_port_add(struct team *team,
895                                          struct team_port *port)
896 {
897         __team_queue_override_port_add(team, port);
898         __team_queue_override_enabled_check(team);
899 }
900
901 static void team_queue_override_port_del(struct team *team,
902                                          struct team_port *port)
903 {
904         __team_queue_override_port_del(team, port);
905         __team_queue_override_enabled_check(team);
906 }
907
908
909 /****************
910  * Port handling
911  ****************/
912
913 static bool team_port_find(const struct team *team,
914                            const struct team_port *port)
915 {
916         struct team_port *cur;
917
918         list_for_each_entry(cur, &team->port_list, list)
919                 if (cur == port)
920                         return true;
921         return false;
922 }
923
924 /*
925  * Enable/disable port by adding to enabled port hashlist and setting
926  * port->index (Might be racy so reader could see incorrect ifindex when
927  * processing a flying packet, but that is not a problem). Write guarded
928  * by team->lock.
929  */
930 static void team_port_enable(struct team *team,
931                              struct team_port *port)
932 {
933         if (team_port_enabled(port))
934                 return;
935         port->index = team->en_port_count++;
936         hlist_add_head_rcu(&port->hlist,
937                            team_port_index_hash(team, port->index));
938         team_adjust_ops(team);
939         team_queue_override_port_add(team, port);
940         if (team->ops.port_enabled)
941                 team->ops.port_enabled(team, port);
942         team_notify_peers(team);
943         team_mcast_rejoin(team);
944         team_lower_state_changed(port);
945 }
946
947 static void __reconstruct_port_hlist(struct team *team, int rm_index)
948 {
949         int i;
950         struct team_port *port;
951
952         for (i = rm_index + 1; i < team->en_port_count; i++) {
953                 port = team_get_port_by_index(team, i);
954                 hlist_del_rcu(&port->hlist);
955                 port->index--;
956                 hlist_add_head_rcu(&port->hlist,
957                                    team_port_index_hash(team, port->index));
958         }
959 }
960
961 static void team_port_disable(struct team *team,
962                               struct team_port *port)
963 {
964         if (!team_port_enabled(port))
965                 return;
966         if (team->ops.port_disabled)
967                 team->ops.port_disabled(team, port);
968         hlist_del_rcu(&port->hlist);
969         __reconstruct_port_hlist(team, port->index);
970         port->index = -1;
971         team->en_port_count--;
972         team_queue_override_port_del(team, port);
973         team_adjust_ops(team);
974         team_lower_state_changed(port);
975 }
976
977 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
978                             NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
979                             NETIF_F_HIGHDMA | NETIF_F_LRO)
980
981 #define TEAM_ENC_FEATURES       (NETIF_F_HW_CSUM | NETIF_F_SG | \
982                                  NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
983
984 static void __team_compute_features(struct team *team)
985 {
986         struct team_port *port;
987         netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
988                                           NETIF_F_ALL_FOR_ALL;
989         netdev_features_t enc_features  = TEAM_ENC_FEATURES;
990         unsigned short max_hard_header_len = ETH_HLEN;
991         unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
992                                         IFF_XMIT_DST_RELEASE_PERM;
993
994         list_for_each_entry(port, &team->port_list, list) {
995                 vlan_features = netdev_increment_features(vlan_features,
996                                         port->dev->vlan_features,
997                                         TEAM_VLAN_FEATURES);
998                 enc_features =
999                         netdev_increment_features(enc_features,
1000                                                   port->dev->hw_enc_features,
1001                                                   TEAM_ENC_FEATURES);
1002
1003
1004                 dst_release_flag &= port->dev->priv_flags;
1005                 if (port->dev->hard_header_len > max_hard_header_len)
1006                         max_hard_header_len = port->dev->hard_header_len;
1007         }
1008
1009         team->dev->vlan_features = vlan_features;
1010         team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1011                                      NETIF_F_GSO_UDP_L4;
1012         team->dev->hard_header_len = max_hard_header_len;
1013
1014         team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1015         if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1016                 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1017 }
1018
1019 static void team_compute_features(struct team *team)
1020 {
1021         mutex_lock(&team->lock);
1022         __team_compute_features(team);
1023         mutex_unlock(&team->lock);
1024         netdev_change_features(team->dev);
1025 }
1026
1027 static int team_port_enter(struct team *team, struct team_port *port)
1028 {
1029         int err = 0;
1030
1031         dev_hold(team->dev);
1032         if (team->ops.port_enter) {
1033                 err = team->ops.port_enter(team, port);
1034                 if (err) {
1035                         netdev_err(team->dev, "Device %s failed to enter team mode\n",
1036                                    port->dev->name);
1037                         goto err_port_enter;
1038                 }
1039         }
1040
1041         return 0;
1042
1043 err_port_enter:
1044         dev_put(team->dev);
1045
1046         return err;
1047 }
1048
1049 static void team_port_leave(struct team *team, struct team_port *port)
1050 {
1051         if (team->ops.port_leave)
1052                 team->ops.port_leave(team, port);
1053         dev_put(team->dev);
1054 }
1055
1056 #ifdef CONFIG_NET_POLL_CONTROLLER
1057 static int __team_port_enable_netpoll(struct team_port *port)
1058 {
1059         struct netpoll *np;
1060         int err;
1061
1062         np = kzalloc(sizeof(*np), GFP_KERNEL);
1063         if (!np)
1064                 return -ENOMEM;
1065
1066         err = __netpoll_setup(np, port->dev);
1067         if (err) {
1068                 kfree(np);
1069                 return err;
1070         }
1071         port->np = np;
1072         return err;
1073 }
1074
1075 static int team_port_enable_netpoll(struct team_port *port)
1076 {
1077         if (!port->team->dev->npinfo)
1078                 return 0;
1079
1080         return __team_port_enable_netpoll(port);
1081 }
1082
1083 static void team_port_disable_netpoll(struct team_port *port)
1084 {
1085         struct netpoll *np = port->np;
1086
1087         if (!np)
1088                 return;
1089         port->np = NULL;
1090
1091         __netpoll_free(np);
1092 }
1093 #else
1094 static int team_port_enable_netpoll(struct team_port *port)
1095 {
1096         return 0;
1097 }
1098 static void team_port_disable_netpoll(struct team_port *port)
1099 {
1100 }
1101 #endif
1102
1103 static int team_upper_dev_link(struct team *team, struct team_port *port,
1104                                struct netlink_ext_ack *extack)
1105 {
1106         struct netdev_lag_upper_info lag_upper_info;
1107         int err;
1108
1109         lag_upper_info.tx_type = team->mode->lag_tx_type;
1110         lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1111         err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1112                                            &lag_upper_info, extack);
1113         if (err)
1114                 return err;
1115         port->dev->priv_flags |= IFF_TEAM_PORT;
1116         return 0;
1117 }
1118
1119 static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1120 {
1121         netdev_upper_dev_unlink(port->dev, team->dev);
1122         port->dev->priv_flags &= ~IFF_TEAM_PORT;
1123 }
1124
1125 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1126 static int team_dev_type_check_change(struct net_device *dev,
1127                                       struct net_device *port_dev);
1128
1129 static int team_port_add(struct team *team, struct net_device *port_dev,
1130                          struct netlink_ext_ack *extack)
1131 {
1132         struct net_device *dev = team->dev;
1133         struct team_port *port;
1134         char *portname = port_dev->name;
1135         int err;
1136
1137         if (port_dev->flags & IFF_LOOPBACK) {
1138                 NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1139                 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1140                            portname);
1141                 return -EINVAL;
1142         }
1143
1144         if (netif_is_team_port(port_dev)) {
1145                 NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1146                 netdev_err(dev, "Device %s is already a port "
1147                                 "of a team device\n", portname);
1148                 return -EBUSY;
1149         }
1150
1151         if (dev == port_dev) {
1152                 NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1153                 netdev_err(dev, "Cannot enslave team device to itself\n");
1154                 return -EINVAL;
1155         }
1156
1157         if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1158             vlan_uses_dev(dev)) {
1159                 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1160                 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1161                            portname);
1162                 return -EPERM;
1163         }
1164
1165         err = team_dev_type_check_change(dev, port_dev);
1166         if (err)
1167                 return err;
1168
1169         if (port_dev->flags & IFF_UP) {
1170                 NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1171                 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1172                            portname);
1173                 return -EBUSY;
1174         }
1175
1176         port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1177                        GFP_KERNEL);
1178         if (!port)
1179                 return -ENOMEM;
1180
1181         port->dev = port_dev;
1182         port->team = team;
1183         INIT_LIST_HEAD(&port->qom_list);
1184
1185         port->orig.mtu = port_dev->mtu;
1186         err = dev_set_mtu(port_dev, dev->mtu);
1187         if (err) {
1188                 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1189                 goto err_set_mtu;
1190         }
1191
1192         memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1193
1194         err = team_port_enter(team, port);
1195         if (err) {
1196                 netdev_err(dev, "Device %s failed to enter team mode\n",
1197                            portname);
1198                 goto err_port_enter;
1199         }
1200
1201         err = dev_open(port_dev, extack);
1202         if (err) {
1203                 netdev_dbg(dev, "Device %s opening failed\n",
1204                            portname);
1205                 goto err_dev_open;
1206         }
1207
1208         err = vlan_vids_add_by_dev(port_dev, dev);
1209         if (err) {
1210                 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1211                                 portname);
1212                 goto err_vids_add;
1213         }
1214
1215         err = team_port_enable_netpoll(port);
1216         if (err) {
1217                 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1218                            portname);
1219                 goto err_enable_netpoll;
1220         }
1221
1222         if (!(dev->features & NETIF_F_LRO))
1223                 dev_disable_lro(port_dev);
1224
1225         err = netdev_rx_handler_register(port_dev, team_handle_frame,
1226                                          port);
1227         if (err) {
1228                 netdev_err(dev, "Device %s failed to register rx_handler\n",
1229                            portname);
1230                 goto err_handler_register;
1231         }
1232
1233         err = team_upper_dev_link(team, port, extack);
1234         if (err) {
1235                 netdev_err(dev, "Device %s failed to set upper link\n",
1236                            portname);
1237                 goto err_set_upper_link;
1238         }
1239
1240         err = __team_option_inst_add_port(team, port);
1241         if (err) {
1242                 netdev_err(dev, "Device %s failed to add per-port options\n",
1243                            portname);
1244                 goto err_option_port_add;
1245         }
1246
1247         /* set promiscuity level to new slave */
1248         if (dev->flags & IFF_PROMISC) {
1249                 err = dev_set_promiscuity(port_dev, 1);
1250                 if (err)
1251                         goto err_set_slave_promisc;
1252         }
1253
1254         /* set allmulti level to new slave */
1255         if (dev->flags & IFF_ALLMULTI) {
1256                 err = dev_set_allmulti(port_dev, 1);
1257                 if (err) {
1258                         if (dev->flags & IFF_PROMISC)
1259                                 dev_set_promiscuity(port_dev, -1);
1260                         goto err_set_slave_promisc;
1261                 }
1262         }
1263
1264         netif_addr_lock_bh(dev);
1265         dev_uc_sync_multiple(port_dev, dev);
1266         dev_mc_sync_multiple(port_dev, dev);
1267         netif_addr_unlock_bh(dev);
1268
1269         port->index = -1;
1270         list_add_tail_rcu(&port->list, &team->port_list);
1271         team_port_enable(team, port);
1272         __team_compute_features(team);
1273         __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1274         __team_options_change_check(team);
1275
1276         netdev_info(dev, "Port device %s added\n", portname);
1277
1278         return 0;
1279
1280 err_set_slave_promisc:
1281         __team_option_inst_del_port(team, port);
1282
1283 err_option_port_add:
1284         team_upper_dev_unlink(team, port);
1285
1286 err_set_upper_link:
1287         netdev_rx_handler_unregister(port_dev);
1288
1289 err_handler_register:
1290         team_port_disable_netpoll(port);
1291
1292 err_enable_netpoll:
1293         vlan_vids_del_by_dev(port_dev, dev);
1294
1295 err_vids_add:
1296         dev_close(port_dev);
1297
1298 err_dev_open:
1299         team_port_leave(team, port);
1300         team_port_set_orig_dev_addr(port);
1301
1302 err_port_enter:
1303         dev_set_mtu(port_dev, port->orig.mtu);
1304
1305 err_set_mtu:
1306         kfree(port);
1307
1308         return err;
1309 }
1310
1311 static void __team_port_change_port_removed(struct team_port *port);
1312
1313 static int team_port_del(struct team *team, struct net_device *port_dev)
1314 {
1315         struct net_device *dev = team->dev;
1316         struct team_port *port;
1317         char *portname = port_dev->name;
1318
1319         port = team_port_get_rtnl(port_dev);
1320         if (!port || !team_port_find(team, port)) {
1321                 netdev_err(dev, "Device %s does not act as a port of this team\n",
1322                            portname);
1323                 return -ENOENT;
1324         }
1325
1326         team_port_disable(team, port);
1327         list_del_rcu(&port->list);
1328
1329         if (dev->flags & IFF_PROMISC)
1330                 dev_set_promiscuity(port_dev, -1);
1331         if (dev->flags & IFF_ALLMULTI)
1332                 dev_set_allmulti(port_dev, -1);
1333
1334         team_upper_dev_unlink(team, port);
1335         netdev_rx_handler_unregister(port_dev);
1336         team_port_disable_netpoll(port);
1337         vlan_vids_del_by_dev(port_dev, dev);
1338         dev_uc_unsync(port_dev, dev);
1339         dev_mc_unsync(port_dev, dev);
1340         dev_close(port_dev);
1341         team_port_leave(team, port);
1342
1343         __team_option_inst_mark_removed_port(team, port);
1344         __team_options_change_check(team);
1345         __team_option_inst_del_port(team, port);
1346         __team_port_change_port_removed(port);
1347
1348         team_port_set_orig_dev_addr(port);
1349         dev_set_mtu(port_dev, port->orig.mtu);
1350         kfree_rcu(port, rcu);
1351         netdev_info(dev, "Port device %s removed\n", portname);
1352         __team_compute_features(team);
1353
1354         return 0;
1355 }
1356
1357
1358 /*****************
1359  * Net device ops
1360  *****************/
1361
1362 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1363 {
1364         ctx->data.str_val = team->mode->kind;
1365         return 0;
1366 }
1367
1368 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1369 {
1370         return team_change_mode(team, ctx->data.str_val);
1371 }
1372
1373 static int team_notify_peers_count_get(struct team *team,
1374                                        struct team_gsetter_ctx *ctx)
1375 {
1376         ctx->data.u32_val = team->notify_peers.count;
1377         return 0;
1378 }
1379
1380 static int team_notify_peers_count_set(struct team *team,
1381                                        struct team_gsetter_ctx *ctx)
1382 {
1383         team->notify_peers.count = ctx->data.u32_val;
1384         return 0;
1385 }
1386
1387 static int team_notify_peers_interval_get(struct team *team,
1388                                           struct team_gsetter_ctx *ctx)
1389 {
1390         ctx->data.u32_val = team->notify_peers.interval;
1391         return 0;
1392 }
1393
1394 static int team_notify_peers_interval_set(struct team *team,
1395                                           struct team_gsetter_ctx *ctx)
1396 {
1397         team->notify_peers.interval = ctx->data.u32_val;
1398         return 0;
1399 }
1400
1401 static int team_mcast_rejoin_count_get(struct team *team,
1402                                        struct team_gsetter_ctx *ctx)
1403 {
1404         ctx->data.u32_val = team->mcast_rejoin.count;
1405         return 0;
1406 }
1407
1408 static int team_mcast_rejoin_count_set(struct team *team,
1409                                        struct team_gsetter_ctx *ctx)
1410 {
1411         team->mcast_rejoin.count = ctx->data.u32_val;
1412         return 0;
1413 }
1414
1415 static int team_mcast_rejoin_interval_get(struct team *team,
1416                                           struct team_gsetter_ctx *ctx)
1417 {
1418         ctx->data.u32_val = team->mcast_rejoin.interval;
1419         return 0;
1420 }
1421
1422 static int team_mcast_rejoin_interval_set(struct team *team,
1423                                           struct team_gsetter_ctx *ctx)
1424 {
1425         team->mcast_rejoin.interval = ctx->data.u32_val;
1426         return 0;
1427 }
1428
1429 static int team_port_en_option_get(struct team *team,
1430                                    struct team_gsetter_ctx *ctx)
1431 {
1432         struct team_port *port = ctx->info->port;
1433
1434         ctx->data.bool_val = team_port_enabled(port);
1435         return 0;
1436 }
1437
1438 static int team_port_en_option_set(struct team *team,
1439                                    struct team_gsetter_ctx *ctx)
1440 {
1441         struct team_port *port = ctx->info->port;
1442
1443         if (ctx->data.bool_val)
1444                 team_port_enable(team, port);
1445         else
1446                 team_port_disable(team, port);
1447         return 0;
1448 }
1449
1450 static int team_user_linkup_option_get(struct team *team,
1451                                        struct team_gsetter_ctx *ctx)
1452 {
1453         struct team_port *port = ctx->info->port;
1454
1455         ctx->data.bool_val = port->user.linkup;
1456         return 0;
1457 }
1458
1459 static void __team_carrier_check(struct team *team);
1460
1461 static int team_user_linkup_option_set(struct team *team,
1462                                        struct team_gsetter_ctx *ctx)
1463 {
1464         struct team_port *port = ctx->info->port;
1465
1466         port->user.linkup = ctx->data.bool_val;
1467         team_refresh_port_linkup(port);
1468         __team_carrier_check(port->team);
1469         return 0;
1470 }
1471
1472 static int team_user_linkup_en_option_get(struct team *team,
1473                                           struct team_gsetter_ctx *ctx)
1474 {
1475         struct team_port *port = ctx->info->port;
1476
1477         ctx->data.bool_val = port->user.linkup_enabled;
1478         return 0;
1479 }
1480
1481 static int team_user_linkup_en_option_set(struct team *team,
1482                                           struct team_gsetter_ctx *ctx)
1483 {
1484         struct team_port *port = ctx->info->port;
1485
1486         port->user.linkup_enabled = ctx->data.bool_val;
1487         team_refresh_port_linkup(port);
1488         __team_carrier_check(port->team);
1489         return 0;
1490 }
1491
1492 static int team_priority_option_get(struct team *team,
1493                                     struct team_gsetter_ctx *ctx)
1494 {
1495         struct team_port *port = ctx->info->port;
1496
1497         ctx->data.s32_val = port->priority;
1498         return 0;
1499 }
1500
1501 static int team_priority_option_set(struct team *team,
1502                                     struct team_gsetter_ctx *ctx)
1503 {
1504         struct team_port *port = ctx->info->port;
1505         s32 priority = ctx->data.s32_val;
1506
1507         if (port->priority == priority)
1508                 return 0;
1509         port->priority = priority;
1510         team_queue_override_port_prio_changed(team, port);
1511         return 0;
1512 }
1513
1514 static int team_queue_id_option_get(struct team *team,
1515                                     struct team_gsetter_ctx *ctx)
1516 {
1517         struct team_port *port = ctx->info->port;
1518
1519         ctx->data.u32_val = port->queue_id;
1520         return 0;
1521 }
1522
1523 static int team_queue_id_option_set(struct team *team,
1524                                     struct team_gsetter_ctx *ctx)
1525 {
1526         struct team_port *port = ctx->info->port;
1527         u16 new_queue_id = ctx->data.u32_val;
1528
1529         if (port->queue_id == new_queue_id)
1530                 return 0;
1531         if (new_queue_id >= team->dev->real_num_tx_queues)
1532                 return -EINVAL;
1533         team_queue_override_port_change_queue_id(team, port, new_queue_id);
1534         return 0;
1535 }
1536
1537 static const struct team_option team_options[] = {
1538         {
1539                 .name = "mode",
1540                 .type = TEAM_OPTION_TYPE_STRING,
1541                 .getter = team_mode_option_get,
1542                 .setter = team_mode_option_set,
1543         },
1544         {
1545                 .name = "notify_peers_count",
1546                 .type = TEAM_OPTION_TYPE_U32,
1547                 .getter = team_notify_peers_count_get,
1548                 .setter = team_notify_peers_count_set,
1549         },
1550         {
1551                 .name = "notify_peers_interval",
1552                 .type = TEAM_OPTION_TYPE_U32,
1553                 .getter = team_notify_peers_interval_get,
1554                 .setter = team_notify_peers_interval_set,
1555         },
1556         {
1557                 .name = "mcast_rejoin_count",
1558                 .type = TEAM_OPTION_TYPE_U32,
1559                 .getter = team_mcast_rejoin_count_get,
1560                 .setter = team_mcast_rejoin_count_set,
1561         },
1562         {
1563                 .name = "mcast_rejoin_interval",
1564                 .type = TEAM_OPTION_TYPE_U32,
1565                 .getter = team_mcast_rejoin_interval_get,
1566                 .setter = team_mcast_rejoin_interval_set,
1567         },
1568         {
1569                 .name = "enabled",
1570                 .type = TEAM_OPTION_TYPE_BOOL,
1571                 .per_port = true,
1572                 .getter = team_port_en_option_get,
1573                 .setter = team_port_en_option_set,
1574         },
1575         {
1576                 .name = "user_linkup",
1577                 .type = TEAM_OPTION_TYPE_BOOL,
1578                 .per_port = true,
1579                 .getter = team_user_linkup_option_get,
1580                 .setter = team_user_linkup_option_set,
1581         },
1582         {
1583                 .name = "user_linkup_enabled",
1584                 .type = TEAM_OPTION_TYPE_BOOL,
1585                 .per_port = true,
1586                 .getter = team_user_linkup_en_option_get,
1587                 .setter = team_user_linkup_en_option_set,
1588         },
1589         {
1590                 .name = "priority",
1591                 .type = TEAM_OPTION_TYPE_S32,
1592                 .per_port = true,
1593                 .getter = team_priority_option_get,
1594                 .setter = team_priority_option_set,
1595         },
1596         {
1597                 .name = "queue_id",
1598                 .type = TEAM_OPTION_TYPE_U32,
1599                 .per_port = true,
1600                 .getter = team_queue_id_option_get,
1601                 .setter = team_queue_id_option_set,
1602         },
1603 };
1604
1605
1606 static int team_init(struct net_device *dev)
1607 {
1608         struct team *team = netdev_priv(dev);
1609         int i;
1610         int err;
1611
1612         team->dev = dev;
1613         mutex_init(&team->lock);
1614         team_set_no_mode(team);
1615
1616         team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1617         if (!team->pcpu_stats)
1618                 return -ENOMEM;
1619
1620         for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1621                 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1622         INIT_LIST_HEAD(&team->port_list);
1623         err = team_queue_override_init(team);
1624         if (err)
1625                 goto err_team_queue_override_init;
1626
1627         team_adjust_ops(team);
1628
1629         INIT_LIST_HEAD(&team->option_list);
1630         INIT_LIST_HEAD(&team->option_inst_list);
1631
1632         team_notify_peers_init(team);
1633         team_mcast_rejoin_init(team);
1634
1635         err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1636         if (err)
1637                 goto err_options_register;
1638         netif_carrier_off(dev);
1639
1640         netdev_lockdep_set_classes(dev);
1641
1642         return 0;
1643
1644 err_options_register:
1645         team_mcast_rejoin_fini(team);
1646         team_notify_peers_fini(team);
1647         team_queue_override_fini(team);
1648 err_team_queue_override_init:
1649         free_percpu(team->pcpu_stats);
1650
1651         return err;
1652 }
1653
1654 static void team_uninit(struct net_device *dev)
1655 {
1656         struct team *team = netdev_priv(dev);
1657         struct team_port *port;
1658         struct team_port *tmp;
1659
1660         mutex_lock(&team->lock);
1661         list_for_each_entry_safe(port, tmp, &team->port_list, list)
1662                 team_port_del(team, port->dev);
1663
1664         __team_change_mode(team, NULL); /* cleanup */
1665         __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1666         team_mcast_rejoin_fini(team);
1667         team_notify_peers_fini(team);
1668         team_queue_override_fini(team);
1669         mutex_unlock(&team->lock);
1670         netdev_change_features(dev);
1671 }
1672
1673 static void team_destructor(struct net_device *dev)
1674 {
1675         struct team *team = netdev_priv(dev);
1676
1677         free_percpu(team->pcpu_stats);
1678 }
1679
1680 static int team_open(struct net_device *dev)
1681 {
1682         return 0;
1683 }
1684
1685 static int team_close(struct net_device *dev)
1686 {
1687         return 0;
1688 }
1689
1690 /*
1691  * note: already called with rcu_read_lock
1692  */
1693 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1694 {
1695         struct team *team = netdev_priv(dev);
1696         bool tx_success;
1697         unsigned int len = skb->len;
1698
1699         tx_success = team_queue_override_transmit(team, skb);
1700         if (!tx_success)
1701                 tx_success = team->ops.transmit(team, skb);
1702         if (tx_success) {
1703                 struct team_pcpu_stats *pcpu_stats;
1704
1705                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1706                 u64_stats_update_begin(&pcpu_stats->syncp);
1707                 pcpu_stats->tx_packets++;
1708                 pcpu_stats->tx_bytes += len;
1709                 u64_stats_update_end(&pcpu_stats->syncp);
1710         } else {
1711                 this_cpu_inc(team->pcpu_stats->tx_dropped);
1712         }
1713
1714         return NETDEV_TX_OK;
1715 }
1716
1717 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1718                              struct net_device *sb_dev)
1719 {
1720         /*
1721          * This helper function exists to help dev_pick_tx get the correct
1722          * destination queue.  Using a helper function skips a call to
1723          * skb_tx_hash and will put the skbs in the queue we expect on their
1724          * way down to the team driver.
1725          */
1726         u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1727
1728         /*
1729          * Save the original txq to restore before passing to the driver
1730          */
1731         qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1732
1733         if (unlikely(txq >= dev->real_num_tx_queues)) {
1734                 do {
1735                         txq -= dev->real_num_tx_queues;
1736                 } while (txq >= dev->real_num_tx_queues);
1737         }
1738         return txq;
1739 }
1740
1741 static void team_change_rx_flags(struct net_device *dev, int change)
1742 {
1743         struct team *team = netdev_priv(dev);
1744         struct team_port *port;
1745         int inc;
1746
1747         rcu_read_lock();
1748         list_for_each_entry_rcu(port, &team->port_list, list) {
1749                 if (change & IFF_PROMISC) {
1750                         inc = dev->flags & IFF_PROMISC ? 1 : -1;
1751                         dev_set_promiscuity(port->dev, inc);
1752                 }
1753                 if (change & IFF_ALLMULTI) {
1754                         inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1755                         dev_set_allmulti(port->dev, inc);
1756                 }
1757         }
1758         rcu_read_unlock();
1759 }
1760
1761 static void team_set_rx_mode(struct net_device *dev)
1762 {
1763         struct team *team = netdev_priv(dev);
1764         struct team_port *port;
1765
1766         rcu_read_lock();
1767         list_for_each_entry_rcu(port, &team->port_list, list) {
1768                 dev_uc_sync_multiple(port->dev, dev);
1769                 dev_mc_sync_multiple(port->dev, dev);
1770         }
1771         rcu_read_unlock();
1772 }
1773
1774 static int team_set_mac_address(struct net_device *dev, void *p)
1775 {
1776         struct sockaddr *addr = p;
1777         struct team *team = netdev_priv(dev);
1778         struct team_port *port;
1779
1780         if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1781                 return -EADDRNOTAVAIL;
1782         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1783         mutex_lock(&team->lock);
1784         list_for_each_entry(port, &team->port_list, list)
1785                 if (team->ops.port_change_dev_addr)
1786                         team->ops.port_change_dev_addr(team, port);
1787         mutex_unlock(&team->lock);
1788         return 0;
1789 }
1790
1791 static int team_change_mtu(struct net_device *dev, int new_mtu)
1792 {
1793         struct team *team = netdev_priv(dev);
1794         struct team_port *port;
1795         int err;
1796
1797         /*
1798          * Alhough this is reader, it's guarded by team lock. It's not possible
1799          * to traverse list in reverse under rcu_read_lock
1800          */
1801         mutex_lock(&team->lock);
1802         team->port_mtu_change_allowed = true;
1803         list_for_each_entry(port, &team->port_list, list) {
1804                 err = dev_set_mtu(port->dev, new_mtu);
1805                 if (err) {
1806                         netdev_err(dev, "Device %s failed to change mtu",
1807                                    port->dev->name);
1808                         goto unwind;
1809                 }
1810         }
1811         team->port_mtu_change_allowed = false;
1812         mutex_unlock(&team->lock);
1813
1814         dev->mtu = new_mtu;
1815
1816         return 0;
1817
1818 unwind:
1819         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1820                 dev_set_mtu(port->dev, dev->mtu);
1821         team->port_mtu_change_allowed = false;
1822         mutex_unlock(&team->lock);
1823
1824         return err;
1825 }
1826
1827 static void
1828 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1829 {
1830         struct team *team = netdev_priv(dev);
1831         struct team_pcpu_stats *p;
1832         u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1833         u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1834         unsigned int start;
1835         int i;
1836
1837         for_each_possible_cpu(i) {
1838                 p = per_cpu_ptr(team->pcpu_stats, i);
1839                 do {
1840                         start = u64_stats_fetch_begin_irq(&p->syncp);
1841                         rx_packets      = p->rx_packets;
1842                         rx_bytes        = p->rx_bytes;
1843                         rx_multicast    = p->rx_multicast;
1844                         tx_packets      = p->tx_packets;
1845                         tx_bytes        = p->tx_bytes;
1846                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1847
1848                 stats->rx_packets       += rx_packets;
1849                 stats->rx_bytes         += rx_bytes;
1850                 stats->multicast        += rx_multicast;
1851                 stats->tx_packets       += tx_packets;
1852                 stats->tx_bytes         += tx_bytes;
1853                 /*
1854                  * rx_dropped, tx_dropped & rx_nohandler are u32,
1855                  * updated without syncp protection.
1856                  */
1857                 rx_dropped      += p->rx_dropped;
1858                 tx_dropped      += p->tx_dropped;
1859                 rx_nohandler    += p->rx_nohandler;
1860         }
1861         stats->rx_dropped       = rx_dropped;
1862         stats->tx_dropped       = tx_dropped;
1863         stats->rx_nohandler     = rx_nohandler;
1864 }
1865
1866 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1867 {
1868         struct team *team = netdev_priv(dev);
1869         struct team_port *port;
1870         int err;
1871
1872         /*
1873          * Alhough this is reader, it's guarded by team lock. It's not possible
1874          * to traverse list in reverse under rcu_read_lock
1875          */
1876         mutex_lock(&team->lock);
1877         list_for_each_entry(port, &team->port_list, list) {
1878                 err = vlan_vid_add(port->dev, proto, vid);
1879                 if (err)
1880                         goto unwind;
1881         }
1882         mutex_unlock(&team->lock);
1883
1884         return 0;
1885
1886 unwind:
1887         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1888                 vlan_vid_del(port->dev, proto, vid);
1889         mutex_unlock(&team->lock);
1890
1891         return err;
1892 }
1893
1894 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1895 {
1896         struct team *team = netdev_priv(dev);
1897         struct team_port *port;
1898
1899         mutex_lock(&team->lock);
1900         list_for_each_entry(port, &team->port_list, list)
1901                 vlan_vid_del(port->dev, proto, vid);
1902         mutex_unlock(&team->lock);
1903
1904         return 0;
1905 }
1906
1907 #ifdef CONFIG_NET_POLL_CONTROLLER
1908 static void team_poll_controller(struct net_device *dev)
1909 {
1910 }
1911
1912 static void __team_netpoll_cleanup(struct team *team)
1913 {
1914         struct team_port *port;
1915
1916         list_for_each_entry(port, &team->port_list, list)
1917                 team_port_disable_netpoll(port);
1918 }
1919
1920 static void team_netpoll_cleanup(struct net_device *dev)
1921 {
1922         struct team *team = netdev_priv(dev);
1923
1924         mutex_lock(&team->lock);
1925         __team_netpoll_cleanup(team);
1926         mutex_unlock(&team->lock);
1927 }
1928
1929 static int team_netpoll_setup(struct net_device *dev,
1930                               struct netpoll_info *npifo)
1931 {
1932         struct team *team = netdev_priv(dev);
1933         struct team_port *port;
1934         int err = 0;
1935
1936         mutex_lock(&team->lock);
1937         list_for_each_entry(port, &team->port_list, list) {
1938                 err = __team_port_enable_netpoll(port);
1939                 if (err) {
1940                         __team_netpoll_cleanup(team);
1941                         break;
1942                 }
1943         }
1944         mutex_unlock(&team->lock);
1945         return err;
1946 }
1947 #endif
1948
1949 static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1950                           struct netlink_ext_ack *extack)
1951 {
1952         struct team *team = netdev_priv(dev);
1953         int err;
1954
1955         mutex_lock(&team->lock);
1956         err = team_port_add(team, port_dev, extack);
1957         mutex_unlock(&team->lock);
1958
1959         if (!err)
1960                 netdev_change_features(dev);
1961
1962         return err;
1963 }
1964
1965 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1966 {
1967         struct team *team = netdev_priv(dev);
1968         int err;
1969
1970         mutex_lock(&team->lock);
1971         err = team_port_del(team, port_dev);
1972         mutex_unlock(&team->lock);
1973
1974         if (!err)
1975                 netdev_change_features(dev);
1976
1977         return err;
1978 }
1979
1980 static netdev_features_t team_fix_features(struct net_device *dev,
1981                                            netdev_features_t features)
1982 {
1983         struct team_port *port;
1984         struct team *team = netdev_priv(dev);
1985         netdev_features_t mask;
1986
1987         mask = features;
1988         features &= ~NETIF_F_ONE_FOR_ALL;
1989         features |= NETIF_F_ALL_FOR_ALL;
1990
1991         rcu_read_lock();
1992         list_for_each_entry_rcu(port, &team->port_list, list) {
1993                 features = netdev_increment_features(features,
1994                                                      port->dev->features,
1995                                                      mask);
1996         }
1997         rcu_read_unlock();
1998
1999         features = netdev_add_tso_features(features, mask);
2000
2001         return features;
2002 }
2003
2004 static int team_change_carrier(struct net_device *dev, bool new_carrier)
2005 {
2006         struct team *team = netdev_priv(dev);
2007
2008         team->user_carrier_enabled = true;
2009
2010         if (new_carrier)
2011                 netif_carrier_on(dev);
2012         else
2013                 netif_carrier_off(dev);
2014         return 0;
2015 }
2016
2017 static const struct net_device_ops team_netdev_ops = {
2018         .ndo_init               = team_init,
2019         .ndo_uninit             = team_uninit,
2020         .ndo_open               = team_open,
2021         .ndo_stop               = team_close,
2022         .ndo_start_xmit         = team_xmit,
2023         .ndo_select_queue       = team_select_queue,
2024         .ndo_change_rx_flags    = team_change_rx_flags,
2025         .ndo_set_rx_mode        = team_set_rx_mode,
2026         .ndo_set_mac_address    = team_set_mac_address,
2027         .ndo_change_mtu         = team_change_mtu,
2028         .ndo_get_stats64        = team_get_stats64,
2029         .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
2030         .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
2031 #ifdef CONFIG_NET_POLL_CONTROLLER
2032         .ndo_poll_controller    = team_poll_controller,
2033         .ndo_netpoll_setup      = team_netpoll_setup,
2034         .ndo_netpoll_cleanup    = team_netpoll_cleanup,
2035 #endif
2036         .ndo_add_slave          = team_add_slave,
2037         .ndo_del_slave          = team_del_slave,
2038         .ndo_fix_features       = team_fix_features,
2039         .ndo_change_carrier     = team_change_carrier,
2040         .ndo_features_check     = passthru_features_check,
2041 };
2042
2043 /***********************
2044  * ethtool interface
2045  ***********************/
2046
2047 static void team_ethtool_get_drvinfo(struct net_device *dev,
2048                                      struct ethtool_drvinfo *drvinfo)
2049 {
2050         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2051         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2052 }
2053
2054 static const struct ethtool_ops team_ethtool_ops = {
2055         .get_drvinfo            = team_ethtool_get_drvinfo,
2056         .get_link               = ethtool_op_get_link,
2057 };
2058
2059 /***********************
2060  * rt netlink interface
2061  ***********************/
2062
2063 static void team_setup_by_port(struct net_device *dev,
2064                                struct net_device *port_dev)
2065 {
2066         dev->header_ops = port_dev->header_ops;
2067         dev->type = port_dev->type;
2068         dev->hard_header_len = port_dev->hard_header_len;
2069         dev->addr_len = port_dev->addr_len;
2070         dev->mtu = port_dev->mtu;
2071         memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2072         eth_hw_addr_inherit(dev, port_dev);
2073 }
2074
2075 static int team_dev_type_check_change(struct net_device *dev,
2076                                       struct net_device *port_dev)
2077 {
2078         struct team *team = netdev_priv(dev);
2079         char *portname = port_dev->name;
2080         int err;
2081
2082         if (dev->type == port_dev->type)
2083                 return 0;
2084         if (!list_empty(&team->port_list)) {
2085                 netdev_err(dev, "Device %s is of different type\n", portname);
2086                 return -EBUSY;
2087         }
2088         err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2089         err = notifier_to_errno(err);
2090         if (err) {
2091                 netdev_err(dev, "Refused to change device type\n");
2092                 return err;
2093         }
2094         dev_uc_flush(dev);
2095         dev_mc_flush(dev);
2096         team_setup_by_port(dev, port_dev);
2097         call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2098         return 0;
2099 }
2100
2101 static void team_setup(struct net_device *dev)
2102 {
2103         ether_setup(dev);
2104         dev->max_mtu = ETH_MAX_MTU;
2105
2106         dev->netdev_ops = &team_netdev_ops;
2107         dev->ethtool_ops = &team_ethtool_ops;
2108         dev->needs_free_netdev = true;
2109         dev->priv_destructor = team_destructor;
2110         dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2111         dev->priv_flags |= IFF_NO_QUEUE;
2112         dev->priv_flags |= IFF_TEAM;
2113
2114         /*
2115          * Indicate we support unicast address filtering. That way core won't
2116          * bring us to promisc mode in case a unicast addr is added.
2117          * Let this up to underlay drivers.
2118          */
2119         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2120
2121         dev->features |= NETIF_F_LLTX;
2122         dev->features |= NETIF_F_GRO;
2123
2124         /* Don't allow team devices to change network namespaces. */
2125         dev->features |= NETIF_F_NETNS_LOCAL;
2126
2127         dev->hw_features = TEAM_VLAN_FEATURES |
2128                            NETIF_F_HW_VLAN_CTAG_TX |
2129                            NETIF_F_HW_VLAN_CTAG_RX |
2130                            NETIF_F_HW_VLAN_CTAG_FILTER;
2131
2132         dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
2133         dev->features |= dev->hw_features;
2134 }
2135
2136 static int team_newlink(struct net *src_net, struct net_device *dev,
2137                         struct nlattr *tb[], struct nlattr *data[],
2138                         struct netlink_ext_ack *extack)
2139 {
2140         if (tb[IFLA_ADDRESS] == NULL)
2141                 eth_hw_addr_random(dev);
2142
2143         return register_netdevice(dev);
2144 }
2145
2146 static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2147                          struct netlink_ext_ack *extack)
2148 {
2149         if (tb[IFLA_ADDRESS]) {
2150                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2151                         return -EINVAL;
2152                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2153                         return -EADDRNOTAVAIL;
2154         }
2155         return 0;
2156 }
2157
2158 static unsigned int team_get_num_tx_queues(void)
2159 {
2160         return TEAM_DEFAULT_NUM_TX_QUEUES;
2161 }
2162
2163 static unsigned int team_get_num_rx_queues(void)
2164 {
2165         return TEAM_DEFAULT_NUM_RX_QUEUES;
2166 }
2167
2168 static struct rtnl_link_ops team_link_ops __read_mostly = {
2169         .kind                   = DRV_NAME,
2170         .priv_size              = sizeof(struct team),
2171         .setup                  = team_setup,
2172         .newlink                = team_newlink,
2173         .validate               = team_validate,
2174         .get_num_tx_queues      = team_get_num_tx_queues,
2175         .get_num_rx_queues      = team_get_num_rx_queues,
2176 };
2177
2178
2179 /***********************************
2180  * Generic netlink custom interface
2181  ***********************************/
2182
2183 static struct genl_family team_nl_family;
2184
2185 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2186         [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
2187         [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
2188         [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
2189         [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
2190 };
2191
2192 static const struct nla_policy
2193 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2194         [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
2195         [TEAM_ATTR_OPTION_NAME] = {
2196                 .type = NLA_STRING,
2197                 .len = TEAM_STRING_MAX_LEN,
2198         },
2199         [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
2200         [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
2201         [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
2202 };
2203
2204 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2205 {
2206         struct sk_buff *msg;
2207         void *hdr;
2208         int err;
2209
2210         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2211         if (!msg)
2212                 return -ENOMEM;
2213
2214         hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2215                           &team_nl_family, 0, TEAM_CMD_NOOP);
2216         if (!hdr) {
2217                 err = -EMSGSIZE;
2218                 goto err_msg_put;
2219         }
2220
2221         genlmsg_end(msg, hdr);
2222
2223         return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2224
2225 err_msg_put:
2226         nlmsg_free(msg);
2227
2228         return err;
2229 }
2230
2231 /*
2232  * Netlink cmd functions should be locked by following two functions.
2233  * Since dev gets held here, that ensures dev won't disappear in between.
2234  */
2235 static struct team *team_nl_team_get(struct genl_info *info)
2236 {
2237         struct net *net = genl_info_net(info);
2238         int ifindex;
2239         struct net_device *dev;
2240         struct team *team;
2241
2242         if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2243                 return NULL;
2244
2245         ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2246         dev = dev_get_by_index(net, ifindex);
2247         if (!dev || dev->netdev_ops != &team_netdev_ops) {
2248                 if (dev)
2249                         dev_put(dev);
2250                 return NULL;
2251         }
2252
2253         team = netdev_priv(dev);
2254         mutex_lock(&team->lock);
2255         return team;
2256 }
2257
2258 static void team_nl_team_put(struct team *team)
2259 {
2260         mutex_unlock(&team->lock);
2261         dev_put(team->dev);
2262 }
2263
2264 typedef int team_nl_send_func_t(struct sk_buff *skb,
2265                                 struct team *team, u32 portid);
2266
2267 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2268 {
2269         return genlmsg_unicast(dev_net(team->dev), skb, portid);
2270 }
2271
2272 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2273                                        struct team_option_inst *opt_inst)
2274 {
2275         struct nlattr *option_item;
2276         struct team_option *option = opt_inst->option;
2277         struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2278         struct team_gsetter_ctx ctx;
2279         int err;
2280
2281         ctx.info = opt_inst_info;
2282         err = team_option_get(team, opt_inst, &ctx);
2283         if (err)
2284                 return err;
2285
2286         option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2287         if (!option_item)
2288                 return -EMSGSIZE;
2289
2290         if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2291                 goto nest_cancel;
2292         if (opt_inst_info->port &&
2293             nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2294                         opt_inst_info->port->dev->ifindex))
2295                 goto nest_cancel;
2296         if (opt_inst->option->array_size &&
2297             nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2298                         opt_inst_info->array_index))
2299                 goto nest_cancel;
2300
2301         switch (option->type) {
2302         case TEAM_OPTION_TYPE_U32:
2303                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2304                         goto nest_cancel;
2305                 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2306                         goto nest_cancel;
2307                 break;
2308         case TEAM_OPTION_TYPE_STRING:
2309                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2310                         goto nest_cancel;
2311                 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2312                                    ctx.data.str_val))
2313                         goto nest_cancel;
2314                 break;
2315         case TEAM_OPTION_TYPE_BINARY:
2316                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2317                         goto nest_cancel;
2318                 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2319                             ctx.data.bin_val.ptr))
2320                         goto nest_cancel;
2321                 break;
2322         case TEAM_OPTION_TYPE_BOOL:
2323                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2324                         goto nest_cancel;
2325                 if (ctx.data.bool_val &&
2326                     nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2327                         goto nest_cancel;
2328                 break;
2329         case TEAM_OPTION_TYPE_S32:
2330                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2331                         goto nest_cancel;
2332                 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2333                         goto nest_cancel;
2334                 break;
2335         default:
2336                 BUG();
2337         }
2338         if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2339                 goto nest_cancel;
2340         if (opt_inst->changed) {
2341                 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2342                         goto nest_cancel;
2343                 opt_inst->changed = false;
2344         }
2345         nla_nest_end(skb, option_item);
2346         return 0;
2347
2348 nest_cancel:
2349         nla_nest_cancel(skb, option_item);
2350         return -EMSGSIZE;
2351 }
2352
2353 static int __send_and_alloc_skb(struct sk_buff **pskb,
2354                                 struct team *team, u32 portid,
2355                                 team_nl_send_func_t *send_func)
2356 {
2357         int err;
2358
2359         if (*pskb) {
2360                 err = send_func(*pskb, team, portid);
2361                 if (err)
2362                         return err;
2363         }
2364         *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2365         if (!*pskb)
2366                 return -ENOMEM;
2367         return 0;
2368 }
2369
2370 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2371                                     int flags, team_nl_send_func_t *send_func,
2372                                     struct list_head *sel_opt_inst_list)
2373 {
2374         struct nlattr *option_list;
2375         struct nlmsghdr *nlh;
2376         void *hdr;
2377         struct team_option_inst *opt_inst;
2378         int err;
2379         struct sk_buff *skb = NULL;
2380         bool incomplete;
2381         int i;
2382
2383         opt_inst = list_first_entry(sel_opt_inst_list,
2384                                     struct team_option_inst, tmp_list);
2385
2386 start_again:
2387         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2388         if (err)
2389                 return err;
2390
2391         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2392                           TEAM_CMD_OPTIONS_GET);
2393         if (!hdr) {
2394                 nlmsg_free(skb);
2395                 return -EMSGSIZE;
2396         }
2397
2398         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2399                 goto nla_put_failure;
2400         option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2401         if (!option_list)
2402                 goto nla_put_failure;
2403
2404         i = 0;
2405         incomplete = false;
2406         list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2407                 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2408                 if (err) {
2409                         if (err == -EMSGSIZE) {
2410                                 if (!i)
2411                                         goto errout;
2412                                 incomplete = true;
2413                                 break;
2414                         }
2415                         goto errout;
2416                 }
2417                 i++;
2418         }
2419
2420         nla_nest_end(skb, option_list);
2421         genlmsg_end(skb, hdr);
2422         if (incomplete)
2423                 goto start_again;
2424
2425 send_done:
2426         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2427         if (!nlh) {
2428                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2429                 if (err)
2430                         return err;
2431                 goto send_done;
2432         }
2433
2434         return send_func(skb, team, portid);
2435
2436 nla_put_failure:
2437         err = -EMSGSIZE;
2438 errout:
2439         nlmsg_free(skb);
2440         return err;
2441 }
2442
2443 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2444 {
2445         struct team *team;
2446         struct team_option_inst *opt_inst;
2447         int err;
2448         LIST_HEAD(sel_opt_inst_list);
2449
2450         team = team_nl_team_get(info);
2451         if (!team)
2452                 return -EINVAL;
2453
2454         list_for_each_entry(opt_inst, &team->option_inst_list, list)
2455                 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2456         err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2457                                        NLM_F_ACK, team_nl_send_unicast,
2458                                        &sel_opt_inst_list);
2459
2460         team_nl_team_put(team);
2461
2462         return err;
2463 }
2464
2465 static int team_nl_send_event_options_get(struct team *team,
2466                                           struct list_head *sel_opt_inst_list);
2467
2468 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2469 {
2470         struct team *team;
2471         int err = 0;
2472         int i;
2473         struct nlattr *nl_option;
2474
2475         rtnl_lock();
2476
2477         team = team_nl_team_get(info);
2478         if (!team) {
2479                 err = -EINVAL;
2480                 goto rtnl_unlock;
2481         }
2482
2483         err = -EINVAL;
2484         if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2485                 err = -EINVAL;
2486                 goto team_put;
2487         }
2488
2489         nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2490                 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2491                 struct nlattr *attr;
2492                 struct nlattr *attr_data;
2493                 LIST_HEAD(opt_inst_list);
2494                 enum team_option_type opt_type;
2495                 int opt_port_ifindex = 0; /* != 0 for per-port options */
2496                 u32 opt_array_index = 0;
2497                 bool opt_is_array = false;
2498                 struct team_option_inst *opt_inst;
2499                 char *opt_name;
2500                 bool opt_found = false;
2501
2502                 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2503                         err = -EINVAL;
2504                         goto team_put;
2505                 }
2506                 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2507                                        nl_option, team_nl_option_policy,
2508                                        info->extack);
2509                 if (err)
2510                         goto team_put;
2511                 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2512                     !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2513                         err = -EINVAL;
2514                         goto team_put;
2515                 }
2516                 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2517                 case NLA_U32:
2518                         opt_type = TEAM_OPTION_TYPE_U32;
2519                         break;
2520                 case NLA_STRING:
2521                         opt_type = TEAM_OPTION_TYPE_STRING;
2522                         break;
2523                 case NLA_BINARY:
2524                         opt_type = TEAM_OPTION_TYPE_BINARY;
2525                         break;
2526                 case NLA_FLAG:
2527                         opt_type = TEAM_OPTION_TYPE_BOOL;
2528                         break;
2529                 case NLA_S32:
2530                         opt_type = TEAM_OPTION_TYPE_S32;
2531                         break;
2532                 default:
2533                         goto team_put;
2534                 }
2535
2536                 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2537                 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2538                         err = -EINVAL;
2539                         goto team_put;
2540                 }
2541
2542                 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2543                 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2544                 if (attr)
2545                         opt_port_ifindex = nla_get_u32(attr);
2546
2547                 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2548                 if (attr) {
2549                         opt_is_array = true;
2550                         opt_array_index = nla_get_u32(attr);
2551                 }
2552
2553                 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2554                         struct team_option *option = opt_inst->option;
2555                         struct team_gsetter_ctx ctx;
2556                         struct team_option_inst_info *opt_inst_info;
2557                         int tmp_ifindex;
2558
2559                         opt_inst_info = &opt_inst->info;
2560                         tmp_ifindex = opt_inst_info->port ?
2561                                       opt_inst_info->port->dev->ifindex : 0;
2562                         if (option->type != opt_type ||
2563                             strcmp(option->name, opt_name) ||
2564                             tmp_ifindex != opt_port_ifindex ||
2565                             (option->array_size && !opt_is_array) ||
2566                             opt_inst_info->array_index != opt_array_index)
2567                                 continue;
2568                         opt_found = true;
2569                         ctx.info = opt_inst_info;
2570                         switch (opt_type) {
2571                         case TEAM_OPTION_TYPE_U32:
2572                                 ctx.data.u32_val = nla_get_u32(attr_data);
2573                                 break;
2574                         case TEAM_OPTION_TYPE_STRING:
2575                                 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2576                                         err = -EINVAL;
2577                                         goto team_put;
2578                                 }
2579                                 ctx.data.str_val = nla_data(attr_data);
2580                                 break;
2581                         case TEAM_OPTION_TYPE_BINARY:
2582                                 ctx.data.bin_val.len = nla_len(attr_data);
2583                                 ctx.data.bin_val.ptr = nla_data(attr_data);
2584                                 break;
2585                         case TEAM_OPTION_TYPE_BOOL:
2586                                 ctx.data.bool_val = attr_data ? true : false;
2587                                 break;
2588                         case TEAM_OPTION_TYPE_S32:
2589                                 ctx.data.s32_val = nla_get_s32(attr_data);
2590                                 break;
2591                         default:
2592                                 BUG();
2593                         }
2594                         err = team_option_set(team, opt_inst, &ctx);
2595                         if (err)
2596                                 goto team_put;
2597                         opt_inst->changed = true;
2598                         list_add(&opt_inst->tmp_list, &opt_inst_list);
2599                 }
2600                 if (!opt_found) {
2601                         err = -ENOENT;
2602                         goto team_put;
2603                 }
2604
2605                 err = team_nl_send_event_options_get(team, &opt_inst_list);
2606                 if (err)
2607                         break;
2608         }
2609
2610 team_put:
2611         team_nl_team_put(team);
2612 rtnl_unlock:
2613         rtnl_unlock();
2614         return err;
2615 }
2616
2617 static int team_nl_fill_one_port_get(struct sk_buff *skb,
2618                                      struct team_port *port)
2619 {
2620         struct nlattr *port_item;
2621
2622         port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2623         if (!port_item)
2624                 goto nest_cancel;
2625         if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2626                 goto nest_cancel;
2627         if (port->changed) {
2628                 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2629                         goto nest_cancel;
2630                 port->changed = false;
2631         }
2632         if ((port->removed &&
2633              nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2634             (port->state.linkup &&
2635              nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2636             nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2637             nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2638                 goto nest_cancel;
2639         nla_nest_end(skb, port_item);
2640         return 0;
2641
2642 nest_cancel:
2643         nla_nest_cancel(skb, port_item);
2644         return -EMSGSIZE;
2645 }
2646
2647 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2648                                       int flags, team_nl_send_func_t *send_func,
2649                                       struct team_port *one_port)
2650 {
2651         struct nlattr *port_list;
2652         struct nlmsghdr *nlh;
2653         void *hdr;
2654         struct team_port *port;
2655         int err;
2656         struct sk_buff *skb = NULL;
2657         bool incomplete;
2658         int i;
2659
2660         port = list_first_entry_or_null(&team->port_list,
2661                                         struct team_port, list);
2662
2663 start_again:
2664         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2665         if (err)
2666                 return err;
2667
2668         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2669                           TEAM_CMD_PORT_LIST_GET);
2670         if (!hdr) {
2671                 nlmsg_free(skb);
2672                 return -EMSGSIZE;
2673         }
2674
2675         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2676                 goto nla_put_failure;
2677         port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2678         if (!port_list)
2679                 goto nla_put_failure;
2680
2681         i = 0;
2682         incomplete = false;
2683
2684         /* If one port is selected, called wants to send port list containing
2685          * only this port. Otherwise go through all listed ports and send all
2686          */
2687         if (one_port) {
2688                 err = team_nl_fill_one_port_get(skb, one_port);
2689                 if (err)
2690                         goto errout;
2691         } else if (port) {
2692                 list_for_each_entry_from(port, &team->port_list, list) {
2693                         err = team_nl_fill_one_port_get(skb, port);
2694                         if (err) {
2695                                 if (err == -EMSGSIZE) {
2696                                         if (!i)
2697                                                 goto errout;
2698                                         incomplete = true;
2699                                         break;
2700                                 }
2701                                 goto errout;
2702                         }
2703                         i++;
2704                 }
2705         }
2706
2707         nla_nest_end(skb, port_list);
2708         genlmsg_end(skb, hdr);
2709         if (incomplete)
2710                 goto start_again;
2711
2712 send_done:
2713         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2714         if (!nlh) {
2715                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2716                 if (err)
2717                         return err;
2718                 goto send_done;
2719         }
2720
2721         return send_func(skb, team, portid);
2722
2723 nla_put_failure:
2724         err = -EMSGSIZE;
2725 errout:
2726         nlmsg_free(skb);
2727         return err;
2728 }
2729
2730 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2731                                      struct genl_info *info)
2732 {
2733         struct team *team;
2734         int err;
2735
2736         team = team_nl_team_get(info);
2737         if (!team)
2738                 return -EINVAL;
2739
2740         err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2741                                          NLM_F_ACK, team_nl_send_unicast, NULL);
2742
2743         team_nl_team_put(team);
2744
2745         return err;
2746 }
2747
2748 static const struct genl_ops team_nl_ops[] = {
2749         {
2750                 .cmd = TEAM_CMD_NOOP,
2751                 .doit = team_nl_cmd_noop,
2752         },
2753         {
2754                 .cmd = TEAM_CMD_OPTIONS_SET,
2755                 .doit = team_nl_cmd_options_set,
2756                 .flags = GENL_ADMIN_PERM,
2757         },
2758         {
2759                 .cmd = TEAM_CMD_OPTIONS_GET,
2760                 .doit = team_nl_cmd_options_get,
2761                 .flags = GENL_ADMIN_PERM,
2762         },
2763         {
2764                 .cmd = TEAM_CMD_PORT_LIST_GET,
2765                 .doit = team_nl_cmd_port_list_get,
2766                 .flags = GENL_ADMIN_PERM,
2767         },
2768 };
2769
2770 static const struct genl_multicast_group team_nl_mcgrps[] = {
2771         { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2772 };
2773
2774 static struct genl_family team_nl_family __ro_after_init = {
2775         .name           = TEAM_GENL_NAME,
2776         .version        = TEAM_GENL_VERSION,
2777         .maxattr        = TEAM_ATTR_MAX,
2778         .policy = team_nl_policy,
2779         .netnsok        = true,
2780         .module         = THIS_MODULE,
2781         .ops            = team_nl_ops,
2782         .n_ops          = ARRAY_SIZE(team_nl_ops),
2783         .mcgrps         = team_nl_mcgrps,
2784         .n_mcgrps       = ARRAY_SIZE(team_nl_mcgrps),
2785 };
2786
2787 static int team_nl_send_multicast(struct sk_buff *skb,
2788                                   struct team *team, u32 portid)
2789 {
2790         return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2791                                        skb, 0, 0, GFP_KERNEL);
2792 }
2793
2794 static int team_nl_send_event_options_get(struct team *team,
2795                                           struct list_head *sel_opt_inst_list)
2796 {
2797         return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2798                                         sel_opt_inst_list);
2799 }
2800
2801 static int team_nl_send_event_port_get(struct team *team,
2802                                        struct team_port *port)
2803 {
2804         return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2805                                           port);
2806 }
2807
2808 static int __init team_nl_init(void)
2809 {
2810         return genl_register_family(&team_nl_family);
2811 }
2812
2813 static void team_nl_fini(void)
2814 {
2815         genl_unregister_family(&team_nl_family);
2816 }
2817
2818
2819 /******************
2820  * Change checkers
2821  ******************/
2822
2823 static void __team_options_change_check(struct team *team)
2824 {
2825         int err;
2826         struct team_option_inst *opt_inst;
2827         LIST_HEAD(sel_opt_inst_list);
2828
2829         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2830                 if (opt_inst->changed)
2831                         list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2832         }
2833         err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2834         if (err && err != -ESRCH)
2835                 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2836                             err);
2837 }
2838
2839 /* rtnl lock is held */
2840
2841 static void __team_port_change_send(struct team_port *port, bool linkup)
2842 {
2843         int err;
2844
2845         port->changed = true;
2846         port->state.linkup = linkup;
2847         team_refresh_port_linkup(port);
2848         if (linkup) {
2849                 struct ethtool_link_ksettings ecmd;
2850
2851                 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2852                 if (!err) {
2853                         port->state.speed = ecmd.base.speed;
2854                         port->state.duplex = ecmd.base.duplex;
2855                         goto send_event;
2856                 }
2857         }
2858         port->state.speed = 0;
2859         port->state.duplex = 0;
2860
2861 send_event:
2862         err = team_nl_send_event_port_get(port->team, port);
2863         if (err && err != -ESRCH)
2864                 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2865                             port->dev->name, err);
2866
2867 }
2868
2869 static void __team_carrier_check(struct team *team)
2870 {
2871         struct team_port *port;
2872         bool team_linkup;
2873
2874         if (team->user_carrier_enabled)
2875                 return;
2876
2877         team_linkup = false;
2878         list_for_each_entry(port, &team->port_list, list) {
2879                 if (port->linkup) {
2880                         team_linkup = true;
2881                         break;
2882                 }
2883         }
2884
2885         if (team_linkup)
2886                 netif_carrier_on(team->dev);
2887         else
2888                 netif_carrier_off(team->dev);
2889 }
2890
2891 static void __team_port_change_check(struct team_port *port, bool linkup)
2892 {
2893         if (port->state.linkup != linkup)
2894                 __team_port_change_send(port, linkup);
2895         __team_carrier_check(port->team);
2896 }
2897
2898 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2899 {
2900         __team_port_change_send(port, linkup);
2901         __team_carrier_check(port->team);
2902 }
2903
2904 static void __team_port_change_port_removed(struct team_port *port)
2905 {
2906         port->removed = true;
2907         __team_port_change_send(port, false);
2908         __team_carrier_check(port->team);
2909 }
2910
2911 static void team_port_change_check(struct team_port *port, bool linkup)
2912 {
2913         struct team *team = port->team;
2914
2915         mutex_lock(&team->lock);
2916         __team_port_change_check(port, linkup);
2917         mutex_unlock(&team->lock);
2918 }
2919
2920
2921 /************************************
2922  * Net device notifier event handler
2923  ************************************/
2924
2925 static int team_device_event(struct notifier_block *unused,
2926                              unsigned long event, void *ptr)
2927 {
2928         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2929         struct team_port *port;
2930
2931         port = team_port_get_rtnl(dev);
2932         if (!port)
2933                 return NOTIFY_DONE;
2934
2935         switch (event) {
2936         case NETDEV_UP:
2937                 if (netif_oper_up(dev))
2938                         team_port_change_check(port, true);
2939                 break;
2940         case NETDEV_DOWN:
2941                 team_port_change_check(port, false);
2942                 break;
2943         case NETDEV_CHANGE:
2944                 if (netif_running(port->dev))
2945                         team_port_change_check(port,
2946                                                !!netif_oper_up(port->dev));
2947                 break;
2948         case NETDEV_UNREGISTER:
2949                 team_del_slave(port->team->dev, dev);
2950                 break;
2951         case NETDEV_FEAT_CHANGE:
2952                 team_compute_features(port->team);
2953                 break;
2954         case NETDEV_PRECHANGEMTU:
2955                 /* Forbid to change mtu of underlaying device */
2956                 if (!port->team->port_mtu_change_allowed)
2957                         return NOTIFY_BAD;
2958                 break;
2959         case NETDEV_PRE_TYPE_CHANGE:
2960                 /* Forbid to change type of underlaying device */
2961                 return NOTIFY_BAD;
2962         case NETDEV_RESEND_IGMP:
2963                 /* Propagate to master device */
2964                 call_netdevice_notifiers(event, port->team->dev);
2965                 break;
2966         }
2967         return NOTIFY_DONE;
2968 }
2969
2970 static struct notifier_block team_notifier_block __read_mostly = {
2971         .notifier_call = team_device_event,
2972 };
2973
2974
2975 /***********************
2976  * Module init and exit
2977  ***********************/
2978
2979 static int __init team_module_init(void)
2980 {
2981         int err;
2982
2983         register_netdevice_notifier(&team_notifier_block);
2984
2985         err = rtnl_link_register(&team_link_ops);
2986         if (err)
2987                 goto err_rtnl_reg;
2988
2989         err = team_nl_init();
2990         if (err)
2991                 goto err_nl_init;
2992
2993         return 0;
2994
2995 err_nl_init:
2996         rtnl_link_unregister(&team_link_ops);
2997
2998 err_rtnl_reg:
2999         unregister_netdevice_notifier(&team_notifier_block);
3000
3001         return err;
3002 }
3003
3004 static void __exit team_module_exit(void)
3005 {
3006         team_nl_fini();
3007         rtnl_link_unregister(&team_link_ops);
3008         unregister_netdevice_notifier(&team_notifier_block);
3009 }
3010
3011 module_init(team_module_init);
3012 module_exit(team_module_exit);
3013
3014 MODULE_LICENSE("GPL v2");
3015 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3016 MODULE_DESCRIPTION("Ethernet team device driver");
3017 MODULE_ALIAS_RTNL_LINK(DRV_NAME);