2 * net/switchdev/switchdev.c - Switch device API
3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/if_bridge.h>
20 #include <linux/list.h>
21 #include <linux/workqueue.h>
22 #include <linux/if_vlan.h>
23 #include <linux/rtnetlink.h>
24 #include <net/switchdev.h>
27 * switchdev_trans_item_enqueue - Enqueue data item to transaction queue
30 * @data: pointer to data being queued
31 * @destructor: data destructor
32 * @tritem: transaction item being queued
34 * Enqeueue data item to transaction queue. tritem is typically placed in
35 * cointainter pointed at by data pointer. Destructor is called on
36 * transaction abort and after successful commit phase in case
37 * the caller did not dequeue the item before.
39 void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
40 void *data, void (*destructor)(void const *),
41 struct switchdev_trans_item *tritem)
44 tritem->destructor = destructor;
45 list_add_tail(&tritem->list, &trans->item_list);
47 EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
49 static struct switchdev_trans_item *
50 __switchdev_trans_item_dequeue(struct switchdev_trans *trans)
52 struct switchdev_trans_item *tritem;
54 if (list_empty(&trans->item_list))
56 tritem = list_first_entry(&trans->item_list,
57 struct switchdev_trans_item, list);
58 list_del(&tritem->list);
63 * switchdev_trans_item_dequeue - Dequeue data item from transaction queue
67 void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
69 struct switchdev_trans_item *tritem;
71 tritem = __switchdev_trans_item_dequeue(trans);
75 EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
77 static void switchdev_trans_init(struct switchdev_trans *trans)
79 INIT_LIST_HEAD(&trans->item_list);
82 static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
84 struct switchdev_trans_item *tritem;
86 while ((tritem = __switchdev_trans_item_dequeue(trans)))
87 tritem->destructor(tritem->data);
90 static void switchdev_trans_items_warn_destroy(struct net_device *dev,
91 struct switchdev_trans *trans)
93 WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
95 switchdev_trans_items_destroy(trans);
98 static LIST_HEAD(deferred);
99 static DEFINE_SPINLOCK(deferred_lock);
101 typedef void switchdev_deferred_func_t(struct net_device *dev,
104 struct switchdev_deferred_item {
105 struct list_head list;
106 struct net_device *dev;
107 switchdev_deferred_func_t *func;
108 unsigned long data[0];
111 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
113 struct switchdev_deferred_item *dfitem;
115 spin_lock_bh(&deferred_lock);
116 if (list_empty(&deferred)) {
120 dfitem = list_first_entry(&deferred,
121 struct switchdev_deferred_item, list);
122 list_del(&dfitem->list);
124 spin_unlock_bh(&deferred_lock);
129 * switchdev_deferred_process - Process ops in deferred queue
131 * Called to flush the ops currently queued in deferred ops queue.
132 * rtnl_lock must be held.
134 void switchdev_deferred_process(void)
136 struct switchdev_deferred_item *dfitem;
140 while ((dfitem = switchdev_deferred_dequeue())) {
141 dfitem->func(dfitem->dev, dfitem->data);
142 dev_put(dfitem->dev);
146 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
148 static void switchdev_deferred_process_work(struct work_struct *work)
151 switchdev_deferred_process();
155 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
157 static int switchdev_deferred_enqueue(struct net_device *dev,
158 const void *data, size_t data_len,
159 switchdev_deferred_func_t *func)
161 struct switchdev_deferred_item *dfitem;
163 dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
168 memcpy(dfitem->data, data, data_len);
170 spin_lock_bh(&deferred_lock);
171 list_add_tail(&dfitem->list, &deferred);
172 spin_unlock_bh(&deferred_lock);
173 schedule_work(&deferred_process_work);
178 * switchdev_port_attr_get - Get port attribute
181 * @attr: attribute to get
183 int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
185 const struct switchdev_ops *ops = dev->switchdev_ops;
186 struct net_device *lower_dev;
187 struct list_head *iter;
188 struct switchdev_attr first = {
189 .id = SWITCHDEV_ATTR_ID_UNDEFINED
191 int err = -EOPNOTSUPP;
193 if (ops && ops->switchdev_port_attr_get)
194 return ops->switchdev_port_attr_get(dev, attr);
196 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
199 /* Switch device port(s) may be stacked under
200 * bond/team/vlan dev, so recurse down to get attr on
201 * each port. Return -ENODATA if attr values don't
202 * compare across ports.
205 netdev_for_each_lower_dev(dev, lower_dev, iter) {
206 err = switchdev_port_attr_get(lower_dev, attr);
209 if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
211 else if (memcmp(&first, attr, sizeof(*attr)))
217 EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
219 static int __switchdev_port_attr_set(struct net_device *dev,
220 const struct switchdev_attr *attr,
221 struct switchdev_trans *trans)
223 const struct switchdev_ops *ops = dev->switchdev_ops;
224 struct net_device *lower_dev;
225 struct list_head *iter;
226 int err = -EOPNOTSUPP;
228 if (ops && ops->switchdev_port_attr_set) {
229 err = ops->switchdev_port_attr_set(dev, attr, trans);
233 if (attr->flags & SWITCHDEV_F_NO_RECURSE)
236 /* Switch device port(s) may be stacked under
237 * bond/team/vlan dev, so recurse down to set attr on
241 netdev_for_each_lower_dev(dev, lower_dev, iter) {
242 err = __switchdev_port_attr_set(lower_dev, attr, trans);
248 if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
254 static int switchdev_port_attr_set_now(struct net_device *dev,
255 const struct switchdev_attr *attr)
257 struct switchdev_trans trans;
260 switchdev_trans_init(&trans);
262 /* Phase I: prepare for attr set. Driver/device should fail
263 * here if there are going to be issues in the commit phase,
264 * such as lack of resources or support. The driver/device
265 * should reserve resources needed for the commit phase here,
266 * but should not commit the attr.
269 trans.ph_prepare = true;
270 err = __switchdev_port_attr_set(dev, attr, &trans);
272 /* Prepare phase failed: abort the transaction. Any
273 * resources reserved in the prepare phase are
277 if (err != -EOPNOTSUPP)
278 switchdev_trans_items_destroy(&trans);
283 /* Phase II: commit attr set. This cannot fail as a fault
284 * of driver/device. If it does, it's a bug in the driver/device
285 * because the driver said everythings was OK in phase I.
288 trans.ph_prepare = false;
289 err = __switchdev_port_attr_set(dev, attr, &trans);
290 WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
291 dev->name, attr->id);
292 switchdev_trans_items_warn_destroy(dev, &trans);
297 static void switchdev_port_attr_set_deferred(struct net_device *dev,
300 const struct switchdev_attr *attr = data;
303 err = switchdev_port_attr_set_now(dev, attr);
304 if (err && err != -EOPNOTSUPP)
305 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
308 attr->complete(dev, err, attr->complete_priv);
311 static int switchdev_port_attr_set_defer(struct net_device *dev,
312 const struct switchdev_attr *attr)
314 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
315 switchdev_port_attr_set_deferred);
319 * switchdev_port_attr_set - Set port attribute
322 * @attr: attribute to set
324 * Use a 2-phase prepare-commit transaction model to ensure
325 * system is not left in a partially updated state due to
326 * failure from driver/device.
328 * rtnl_lock must be held and must not be in atomic section,
329 * in case SWITCHDEV_F_DEFER flag is not set.
331 int switchdev_port_attr_set(struct net_device *dev,
332 const struct switchdev_attr *attr)
334 if (attr->flags & SWITCHDEV_F_DEFER)
335 return switchdev_port_attr_set_defer(dev, attr);
337 return switchdev_port_attr_set_now(dev, attr);
339 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
341 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
344 case SWITCHDEV_OBJ_ID_PORT_VLAN:
345 return sizeof(struct switchdev_obj_port_vlan);
346 case SWITCHDEV_OBJ_ID_PORT_MDB:
347 return sizeof(struct switchdev_obj_port_mdb);
348 case SWITCHDEV_OBJ_ID_HOST_MDB:
349 return sizeof(struct switchdev_obj_port_mdb);
356 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
357 struct net_device *dev,
358 const struct switchdev_obj *obj,
359 struct switchdev_trans *trans,
360 struct netlink_ext_ack *extack)
365 struct switchdev_notifier_port_obj_info obj_info = {
371 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
372 err = notifier_to_errno(rc);
374 WARN_ON(!obj_info.handled);
377 if (!obj_info.handled)
382 static int switchdev_port_obj_add_now(struct net_device *dev,
383 const struct switchdev_obj *obj,
384 struct netlink_ext_ack *extack)
386 struct switchdev_trans trans;
391 switchdev_trans_init(&trans);
393 /* Phase I: prepare for obj add. Driver/device should fail
394 * here if there are going to be issues in the commit phase,
395 * such as lack of resources or support. The driver/device
396 * should reserve resources needed for the commit phase here,
397 * but should not commit the obj.
400 trans.ph_prepare = true;
401 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
402 dev, obj, &trans, extack);
404 /* Prepare phase failed: abort the transaction. Any
405 * resources reserved in the prepare phase are
409 if (err != -EOPNOTSUPP)
410 switchdev_trans_items_destroy(&trans);
415 /* Phase II: commit obj add. This cannot fail as a fault
416 * of driver/device. If it does, it's a bug in the driver/device
417 * because the driver said everythings was OK in phase I.
420 trans.ph_prepare = false;
421 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
422 dev, obj, &trans, extack);
423 WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
424 switchdev_trans_items_warn_destroy(dev, &trans);
429 static void switchdev_port_obj_add_deferred(struct net_device *dev,
432 const struct switchdev_obj *obj = data;
435 err = switchdev_port_obj_add_now(dev, obj, NULL);
436 if (err && err != -EOPNOTSUPP)
437 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
440 obj->complete(dev, err, obj->complete_priv);
443 static int switchdev_port_obj_add_defer(struct net_device *dev,
444 const struct switchdev_obj *obj)
446 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
447 switchdev_port_obj_add_deferred);
451 * switchdev_port_obj_add - Add port object
455 * @obj: object to add
457 * Use a 2-phase prepare-commit transaction model to ensure
458 * system is not left in a partially updated state due to
459 * failure from driver/device.
461 * rtnl_lock must be held and must not be in atomic section,
462 * in case SWITCHDEV_F_DEFER flag is not set.
464 int switchdev_port_obj_add(struct net_device *dev,
465 const struct switchdev_obj *obj,
466 struct netlink_ext_ack *extack)
468 if (obj->flags & SWITCHDEV_F_DEFER)
469 return switchdev_port_obj_add_defer(dev, obj);
471 return switchdev_port_obj_add_now(dev, obj, extack);
473 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
475 static int switchdev_port_obj_del_now(struct net_device *dev,
476 const struct switchdev_obj *obj)
478 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
479 dev, obj, NULL, NULL);
482 static void switchdev_port_obj_del_deferred(struct net_device *dev,
485 const struct switchdev_obj *obj = data;
488 err = switchdev_port_obj_del_now(dev, obj);
489 if (err && err != -EOPNOTSUPP)
490 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
493 obj->complete(dev, err, obj->complete_priv);
496 static int switchdev_port_obj_del_defer(struct net_device *dev,
497 const struct switchdev_obj *obj)
499 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
500 switchdev_port_obj_del_deferred);
504 * switchdev_port_obj_del - Delete port object
508 * @obj: object to delete
510 * rtnl_lock must be held and must not be in atomic section,
511 * in case SWITCHDEV_F_DEFER flag is not set.
513 int switchdev_port_obj_del(struct net_device *dev,
514 const struct switchdev_obj *obj)
516 if (obj->flags & SWITCHDEV_F_DEFER)
517 return switchdev_port_obj_del_defer(dev, obj);
519 return switchdev_port_obj_del_now(dev, obj);
521 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
523 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
524 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
527 * register_switchdev_notifier - Register notifier
528 * @nb: notifier_block
530 * Register switch device notifier.
532 int register_switchdev_notifier(struct notifier_block *nb)
534 return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
536 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
539 * unregister_switchdev_notifier - Unregister notifier
540 * @nb: notifier_block
542 * Unregister switch device notifier.
544 int unregister_switchdev_notifier(struct notifier_block *nb)
546 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
548 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
551 * call_switchdev_notifiers - Call notifiers
552 * @val: value passed unmodified to notifier function
554 * @info: notifier information data
556 * Call all network notifier blocks.
558 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
559 struct switchdev_notifier_info *info)
563 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
565 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
567 int register_switchdev_blocking_notifier(struct notifier_block *nb)
569 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
571 return blocking_notifier_chain_register(chain, nb);
573 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
575 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
577 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
579 return blocking_notifier_chain_unregister(chain, nb);
581 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
583 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
584 struct switchdev_notifier_info *info,
585 struct netlink_ext_ack *extack)
588 info->extack = extack;
589 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
592 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
594 bool switchdev_port_same_parent_id(struct net_device *a,
595 struct net_device *b)
597 struct switchdev_attr a_attr = {
599 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
601 struct switchdev_attr b_attr = {
603 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
606 if (switchdev_port_attr_get(a, &a_attr) ||
607 switchdev_port_attr_get(b, &b_attr))
610 return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
612 EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
614 static int __switchdev_handle_port_obj_add(struct net_device *dev,
615 struct switchdev_notifier_port_obj_info *port_obj_info,
616 bool (*check_cb)(const struct net_device *dev),
617 int (*add_cb)(struct net_device *dev,
618 const struct switchdev_obj *obj,
619 struct switchdev_trans *trans,
620 struct netlink_ext_ack *extack))
622 struct netlink_ext_ack *extack;
623 struct net_device *lower_dev;
624 struct list_head *iter;
625 int err = -EOPNOTSUPP;
627 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
630 /* This flag is only checked if the return value is success. */
631 port_obj_info->handled = true;
632 return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
636 /* Switch ports might be stacked under e.g. a LAG. Ignore the
637 * unsupported devices, another driver might be able to handle them. But
638 * propagate to the callers any hard errors.
640 * If the driver does its own bookkeeping of stacked ports, it's not
641 * necessary to go through this helper.
643 netdev_for_each_lower_dev(dev, lower_dev, iter) {
644 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
646 if (err && err != -EOPNOTSUPP)
653 int switchdev_handle_port_obj_add(struct net_device *dev,
654 struct switchdev_notifier_port_obj_info *port_obj_info,
655 bool (*check_cb)(const struct net_device *dev),
656 int (*add_cb)(struct net_device *dev,
657 const struct switchdev_obj *obj,
658 struct switchdev_trans *trans,
659 struct netlink_ext_ack *extack))
663 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
665 if (err == -EOPNOTSUPP)
669 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
671 static int __switchdev_handle_port_obj_del(struct net_device *dev,
672 struct switchdev_notifier_port_obj_info *port_obj_info,
673 bool (*check_cb)(const struct net_device *dev),
674 int (*del_cb)(struct net_device *dev,
675 const struct switchdev_obj *obj))
677 struct net_device *lower_dev;
678 struct list_head *iter;
679 int err = -EOPNOTSUPP;
682 /* This flag is only checked if the return value is success. */
683 port_obj_info->handled = true;
684 return del_cb(dev, port_obj_info->obj);
687 /* Switch ports might be stacked under e.g. a LAG. Ignore the
688 * unsupported devices, another driver might be able to handle them. But
689 * propagate to the callers any hard errors.
691 * If the driver does its own bookkeeping of stacked ports, it's not
692 * necessary to go through this helper.
694 netdev_for_each_lower_dev(dev, lower_dev, iter) {
695 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
697 if (err && err != -EOPNOTSUPP)
704 int switchdev_handle_port_obj_del(struct net_device *dev,
705 struct switchdev_notifier_port_obj_info *port_obj_info,
706 bool (*check_cb)(const struct net_device *dev),
707 int (*del_cb)(struct net_device *dev,
708 const struct switchdev_obj *obj))
712 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
714 if (err == -EOPNOTSUPP)
718 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);