1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
26 struct mlxsw_sp_bridge_ops;
28 struct mlxsw_sp_bridge {
29 struct mlxsw_sp *mlxsw_sp;
31 struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 unsigned int interval; /* ms */
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
39 bool vlan_enabled_exists;
40 struct list_head bridges_list;
41 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
46 struct mlxsw_sp_bridge_device {
47 struct net_device *dev;
48 struct list_head list;
49 struct list_head ports_list;
50 struct list_head mids_list;
54 const struct mlxsw_sp_bridge_ops *ops;
57 struct mlxsw_sp_bridge_port {
58 struct net_device *dev;
59 struct mlxsw_sp_bridge_device *bridge_device;
60 struct list_head list;
61 struct list_head vlans_list;
62 unsigned int ref_count;
73 struct mlxsw_sp_bridge_vlan {
74 struct list_head list;
75 struct list_head port_vlan_list;
79 struct mlxsw_sp_bridge_ops {
80 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 struct mlxsw_sp_bridge_port *bridge_port,
82 struct mlxsw_sp_port *mlxsw_sp_port,
83 struct netlink_ext_ack *extack);
84 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 struct mlxsw_sp_bridge_port *bridge_port,
86 struct mlxsw_sp_port *mlxsw_sp_port);
87 int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88 const struct net_device *vxlan_dev, u16 vid,
89 struct netlink_ext_ack *extack);
91 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
92 u16 vid, struct netlink_ext_ack *extack);
94 (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
96 u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
97 const struct mlxsw_sp_fid *fid);
101 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
102 struct mlxsw_sp_bridge_port *bridge_port,
106 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
107 struct mlxsw_sp_bridge_port *bridge_port);
110 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
111 struct mlxsw_sp_bridge_device
115 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
116 struct mlxsw_sp_bridge_port *bridge_port,
119 static struct mlxsw_sp_bridge_device *
120 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
121 const struct net_device *br_dev)
123 struct mlxsw_sp_bridge_device *bridge_device;
125 list_for_each_entry(bridge_device, &bridge->bridges_list, list)
126 if (bridge_device->dev == br_dev)
127 return bridge_device;
132 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
133 const struct net_device *br_dev)
135 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
138 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
141 struct mlxsw_sp *mlxsw_sp = data;
143 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
147 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
148 struct net_device *dev)
150 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
151 netdev_walk_all_upper_dev_rcu(dev,
152 mlxsw_sp_bridge_device_upper_rif_destroy,
156 static struct mlxsw_sp_bridge_device *
157 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
158 struct net_device *br_dev)
160 struct device *dev = bridge->mlxsw_sp->bus_info->dev;
161 struct mlxsw_sp_bridge_device *bridge_device;
162 bool vlan_enabled = br_vlan_enabled(br_dev);
164 if (vlan_enabled && bridge->vlan_enabled_exists) {
165 dev_err(dev, "Only one VLAN-aware bridge is supported\n");
166 return ERR_PTR(-EINVAL);
169 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
171 return ERR_PTR(-ENOMEM);
173 bridge_device->dev = br_dev;
174 bridge_device->vlan_enabled = vlan_enabled;
175 bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
176 bridge_device->mrouter = br_multicast_router(br_dev);
177 INIT_LIST_HEAD(&bridge_device->ports_list);
179 bridge->vlan_enabled_exists = true;
180 bridge_device->ops = bridge->bridge_8021q_ops;
182 bridge_device->ops = bridge->bridge_8021d_ops;
184 INIT_LIST_HEAD(&bridge_device->mids_list);
185 list_add(&bridge_device->list, &bridge->bridges_list);
187 return bridge_device;
191 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
192 struct mlxsw_sp_bridge_device *bridge_device)
194 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
196 list_del(&bridge_device->list);
197 if (bridge_device->vlan_enabled)
198 bridge->vlan_enabled_exists = false;
199 WARN_ON(!list_empty(&bridge_device->ports_list));
200 WARN_ON(!list_empty(&bridge_device->mids_list));
201 kfree(bridge_device);
204 static struct mlxsw_sp_bridge_device *
205 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
206 struct net_device *br_dev)
208 struct mlxsw_sp_bridge_device *bridge_device;
210 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
212 return bridge_device;
214 return mlxsw_sp_bridge_device_create(bridge, br_dev);
218 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
219 struct mlxsw_sp_bridge_device *bridge_device)
221 if (list_empty(&bridge_device->ports_list))
222 mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
225 static struct mlxsw_sp_bridge_port *
226 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
227 const struct net_device *brport_dev)
229 struct mlxsw_sp_bridge_port *bridge_port;
231 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
232 if (bridge_port->dev == brport_dev)
239 struct mlxsw_sp_bridge_port *
240 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
241 struct net_device *brport_dev)
243 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
244 struct mlxsw_sp_bridge_device *bridge_device;
249 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
253 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
256 static struct mlxsw_sp_bridge_port *
257 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
258 struct net_device *brport_dev)
260 struct mlxsw_sp_bridge_port *bridge_port;
261 struct mlxsw_sp_port *mlxsw_sp_port;
263 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
267 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
268 bridge_port->lagged = mlxsw_sp_port->lagged;
269 if (bridge_port->lagged)
270 bridge_port->lag_id = mlxsw_sp_port->lag_id;
272 bridge_port->system_port = mlxsw_sp_port->local_port;
273 bridge_port->dev = brport_dev;
274 bridge_port->bridge_device = bridge_device;
275 bridge_port->stp_state = BR_STATE_DISABLED;
276 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
278 INIT_LIST_HEAD(&bridge_port->vlans_list);
279 list_add(&bridge_port->list, &bridge_device->ports_list);
280 bridge_port->ref_count = 1;
286 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
288 list_del(&bridge_port->list);
289 WARN_ON(!list_empty(&bridge_port->vlans_list));
293 static struct mlxsw_sp_bridge_port *
294 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
295 struct net_device *brport_dev)
297 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
298 struct mlxsw_sp_bridge_device *bridge_device;
299 struct mlxsw_sp_bridge_port *bridge_port;
302 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
304 bridge_port->ref_count++;
308 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
309 if (IS_ERR(bridge_device))
310 return ERR_CAST(bridge_device);
312 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
315 goto err_bridge_port_create;
320 err_bridge_port_create:
321 mlxsw_sp_bridge_device_put(bridge, bridge_device);
325 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
326 struct mlxsw_sp_bridge_port *bridge_port)
328 struct mlxsw_sp_bridge_device *bridge_device;
330 if (--bridge_port->ref_count != 0)
332 bridge_device = bridge_port->bridge_device;
333 mlxsw_sp_bridge_port_destroy(bridge_port);
334 mlxsw_sp_bridge_device_put(bridge, bridge_device);
337 static struct mlxsw_sp_port_vlan *
338 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
339 const struct mlxsw_sp_bridge_device *
343 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
345 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
347 if (!mlxsw_sp_port_vlan->bridge_port)
349 if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
352 if (bridge_device->vlan_enabled &&
353 mlxsw_sp_port_vlan->vid != vid)
355 return mlxsw_sp_port_vlan;
361 static struct mlxsw_sp_port_vlan*
362 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
365 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
367 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
369 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
371 if (fid && mlxsw_sp_fid_index(fid) == fid_index)
372 return mlxsw_sp_port_vlan;
378 static struct mlxsw_sp_bridge_vlan *
379 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
382 struct mlxsw_sp_bridge_vlan *bridge_vlan;
384 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
385 if (bridge_vlan->vid == vid)
392 static struct mlxsw_sp_bridge_vlan *
393 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
395 struct mlxsw_sp_bridge_vlan *bridge_vlan;
397 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
401 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
402 bridge_vlan->vid = vid;
403 list_add(&bridge_vlan->list, &bridge_port->vlans_list);
409 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
411 list_del(&bridge_vlan->list);
412 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
416 static struct mlxsw_sp_bridge_vlan *
417 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
419 struct mlxsw_sp_bridge_vlan *bridge_vlan;
421 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
425 return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
428 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
430 if (list_empty(&bridge_vlan->port_vlan_list))
431 mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
434 static int mlxsw_sp_port_attr_get(struct net_device *dev,
435 struct switchdev_attr *attr)
438 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
439 attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
450 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
451 struct mlxsw_sp_bridge_vlan *bridge_vlan,
454 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
456 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
458 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
460 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
461 bridge_vlan->vid, state);
467 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
468 struct switchdev_trans *trans,
469 struct net_device *orig_dev,
472 struct mlxsw_sp_bridge_port *bridge_port;
473 struct mlxsw_sp_bridge_vlan *bridge_vlan;
476 if (switchdev_trans_ph_prepare(trans))
479 /* It's possible we failed to enslave the port, yet this
480 * operation is executed due to it being deferred.
482 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
487 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
488 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
491 goto err_port_bridge_vlan_stp_set;
494 bridge_port->stp_state = state;
498 err_port_bridge_vlan_stp_set:
499 list_for_each_entry_continue_reverse(bridge_vlan,
500 &bridge_port->vlans_list, list)
501 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
502 bridge_port->stp_state);
507 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
508 struct mlxsw_sp_bridge_vlan *bridge_vlan,
509 enum mlxsw_sp_flood_type packet_type,
512 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
514 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
516 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
518 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
520 mlxsw_sp_port->local_port,
528 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
529 struct mlxsw_sp_bridge_port *bridge_port,
530 enum mlxsw_sp_flood_type packet_type,
533 struct mlxsw_sp_bridge_vlan *bridge_vlan;
536 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
537 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
542 goto err_port_bridge_vlan_flood_set;
547 err_port_bridge_vlan_flood_set:
548 list_for_each_entry_continue_reverse(bridge_vlan,
549 &bridge_port->vlans_list, list)
550 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
551 packet_type, !member);
556 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
557 struct mlxsw_sp_bridge_vlan *bridge_vlan,
560 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
561 u16 vid = bridge_vlan->vid;
563 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
565 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
567 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
574 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
575 struct mlxsw_sp_bridge_port *bridge_port,
578 struct mlxsw_sp_bridge_vlan *bridge_vlan;
581 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
582 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
585 goto err_port_bridge_vlan_learning_set;
590 err_port_bridge_vlan_learning_set:
591 list_for_each_entry_continue_reverse(bridge_vlan,
592 &bridge_port->vlans_list, list)
593 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
598 static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
600 struct switchdev_trans *trans,
601 unsigned long brport_flags)
603 if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
609 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
610 struct switchdev_trans *trans,
611 struct net_device *orig_dev,
612 unsigned long brport_flags)
614 struct mlxsw_sp_bridge_port *bridge_port;
617 if (switchdev_trans_ph_prepare(trans))
620 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
625 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
626 MLXSW_SP_FLOOD_TYPE_UC,
627 brport_flags & BR_FLOOD);
631 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
632 brport_flags & BR_LEARNING);
636 if (bridge_port->bridge_device->multicast_enabled)
639 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
640 MLXSW_SP_FLOOD_TYPE_MC,
647 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
651 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
653 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
656 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
657 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
660 mlxsw_sp->bridge->ageing_time = ageing_time;
664 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
665 struct switchdev_trans *trans,
666 unsigned long ageing_clock_t)
668 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
669 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
670 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
672 if (switchdev_trans_ph_prepare(trans)) {
673 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
674 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
680 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
683 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
684 struct switchdev_trans *trans,
685 struct net_device *orig_dev,
688 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
689 struct mlxsw_sp_bridge_device *bridge_device;
691 if (!switchdev_trans_ph_prepare(trans))
694 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
695 if (WARN_ON(!bridge_device))
698 if (bridge_device->vlan_enabled == vlan_enabled)
701 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
705 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
706 struct switchdev_trans *trans,
707 struct net_device *orig_dev,
708 bool is_port_mrouter)
710 struct mlxsw_sp_bridge_port *bridge_port;
713 if (switchdev_trans_ph_prepare(trans))
716 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
721 if (!bridge_port->bridge_device->multicast_enabled)
724 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
725 MLXSW_SP_FLOOD_TYPE_MC,
730 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
733 bridge_port->mrouter = is_port_mrouter;
737 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
739 const struct mlxsw_sp_bridge_device *bridge_device;
741 bridge_device = bridge_port->bridge_device;
742 return bridge_device->multicast_enabled ? bridge_port->mrouter :
743 bridge_port->flags & BR_MCAST_FLOOD;
746 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
747 struct switchdev_trans *trans,
748 struct net_device *orig_dev,
751 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
752 struct mlxsw_sp_bridge_device *bridge_device;
753 struct mlxsw_sp_bridge_port *bridge_port;
756 if (switchdev_trans_ph_prepare(trans))
759 /* It's possible we failed to enslave the port, yet this
760 * operation is executed due to it being deferred.
762 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
766 if (bridge_device->multicast_enabled != !mc_disabled) {
767 bridge_device->multicast_enabled = !mc_disabled;
768 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
772 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
773 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
774 bool member = mlxsw_sp_mc_flood(bridge_port);
776 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
778 packet_type, member);
783 bridge_device->multicast_enabled = !mc_disabled;
788 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
789 u16 mid_idx, bool add)
794 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
798 mlxsw_reg_smid_pack(smid_pl, mid_idx,
799 mlxsw_sp_router_port(mlxsw_sp), add);
800 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
806 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
807 struct mlxsw_sp_bridge_device *bridge_device,
810 struct mlxsw_sp_mid *mid;
812 list_for_each_entry(mid, &bridge_device->mids_list, list)
813 mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
817 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
818 struct switchdev_trans *trans,
819 struct net_device *orig_dev,
822 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
823 struct mlxsw_sp_bridge_device *bridge_device;
825 if (switchdev_trans_ph_prepare(trans))
828 /* It's possible we failed to enslave the port, yet this
829 * operation is executed due to it being deferred.
831 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
835 if (bridge_device->mrouter != is_mrouter)
836 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
838 bridge_device->mrouter = is_mrouter;
842 static int mlxsw_sp_port_attr_set(struct net_device *dev,
843 const struct switchdev_attr *attr,
844 struct switchdev_trans *trans)
846 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
850 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
851 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
855 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
856 err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
858 attr->u.brport_flags);
860 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
861 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
863 attr->u.brport_flags);
865 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
866 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
867 attr->u.ageing_time);
869 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
870 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
872 attr->u.vlan_filtering);
874 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
875 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
879 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
880 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
882 attr->u.mc_disabled);
884 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
885 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
894 if (switchdev_trans_ph_commit(trans))
895 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
901 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
902 struct mlxsw_sp_bridge_port *bridge_port,
903 struct netlink_ext_ack *extack)
905 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
906 struct mlxsw_sp_bridge_device *bridge_device;
907 u8 local_port = mlxsw_sp_port->local_port;
908 u16 vid = mlxsw_sp_port_vlan->vid;
909 struct mlxsw_sp_fid *fid;
912 bridge_device = bridge_port->bridge_device;
913 fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
917 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
918 bridge_port->flags & BR_FLOOD);
920 goto err_fid_uc_flood_set;
922 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
923 mlxsw_sp_mc_flood(bridge_port));
925 goto err_fid_mc_flood_set;
927 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
930 goto err_fid_bc_flood_set;
932 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
934 goto err_fid_port_vid_map;
936 mlxsw_sp_port_vlan->fid = fid;
940 err_fid_port_vid_map:
941 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
942 err_fid_bc_flood_set:
943 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
944 err_fid_mc_flood_set:
945 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
946 err_fid_uc_flood_set:
947 mlxsw_sp_fid_put(fid);
952 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
954 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
955 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
956 u8 local_port = mlxsw_sp_port->local_port;
957 u16 vid = mlxsw_sp_port_vlan->vid;
959 mlxsw_sp_port_vlan->fid = NULL;
960 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
961 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
962 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
963 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
964 mlxsw_sp_fid_put(fid);
968 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
969 u16 vid, bool is_pvid)
973 else if (mlxsw_sp_port->pvid == vid)
974 return 0; /* Dis-allow untagged packets */
976 return mlxsw_sp_port->pvid;
980 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
981 struct mlxsw_sp_bridge_port *bridge_port,
982 struct netlink_ext_ack *extack)
984 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
985 struct mlxsw_sp_bridge_vlan *bridge_vlan;
986 u16 vid = mlxsw_sp_port_vlan->vid;
989 /* No need to continue if only VLAN flags were changed */
990 if (mlxsw_sp_port_vlan->bridge_port)
993 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
998 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
999 bridge_port->flags & BR_LEARNING);
1001 goto err_port_vid_learning_set;
1003 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1004 bridge_port->stp_state);
1006 goto err_port_vid_stp_set;
1008 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1011 goto err_bridge_vlan_get;
1014 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1015 &bridge_vlan->port_vlan_list);
1017 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1019 mlxsw_sp_port_vlan->bridge_port = bridge_port;
1023 err_bridge_vlan_get:
1024 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1025 err_port_vid_stp_set:
1026 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1027 err_port_vid_learning_set:
1028 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1033 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1035 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1036 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1037 struct mlxsw_sp_bridge_vlan *bridge_vlan;
1038 struct mlxsw_sp_bridge_port *bridge_port;
1039 u16 vid = mlxsw_sp_port_vlan->vid;
1040 bool last_port, last_vlan;
1042 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1043 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1046 bridge_port = mlxsw_sp_port_vlan->bridge_port;
1047 last_vlan = list_is_singular(&bridge_port->vlans_list);
1048 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1049 last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1051 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1052 mlxsw_sp_bridge_vlan_put(bridge_vlan);
1053 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1054 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1056 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1058 mlxsw_sp_fid_index(fid));
1060 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1062 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1064 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1065 mlxsw_sp_port_vlan->bridge_port = NULL;
1069 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1070 struct mlxsw_sp_bridge_port *bridge_port,
1071 u16 vid, bool is_untagged, bool is_pvid,
1072 struct netlink_ext_ack *extack)
1074 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1075 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1076 u16 old_pvid = mlxsw_sp_port->pvid;
1079 /* The only valid scenario in which a port-vlan already exists, is if
1080 * the VLAN flags were changed and the port-vlan is associated with the
1081 * correct bridge port
1083 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1084 if (mlxsw_sp_port_vlan &&
1085 mlxsw_sp_port_vlan->bridge_port != bridge_port)
1088 if (!mlxsw_sp_port_vlan) {
1089 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1091 if (IS_ERR(mlxsw_sp_port_vlan))
1092 return PTR_ERR(mlxsw_sp_port_vlan);
1095 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1098 goto err_port_vlan_set;
1100 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1102 goto err_port_pvid_set;
1104 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1107 goto err_port_vlan_bridge_join;
1111 err_port_vlan_bridge_join:
1112 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1114 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1116 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1121 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1122 const struct net_device *br_dev,
1123 const struct switchdev_obj_port_vlan *vlan)
1125 struct mlxsw_sp_rif *rif;
1126 struct mlxsw_sp_fid *fid;
1130 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1133 fid = mlxsw_sp_rif_fid(rif);
1134 pvid = mlxsw_sp_fid_8021q_vid(fid);
1136 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1137 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1139 netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1144 netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1153 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1154 const struct switchdev_obj_port_vlan *vlan,
1155 struct switchdev_trans *trans,
1156 struct netlink_ext_ack *extack)
1158 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1159 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1160 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1161 struct net_device *orig_dev = vlan->obj.orig_dev;
1162 struct mlxsw_sp_bridge_port *bridge_port;
1165 if (netif_is_bridge_master(orig_dev)) {
1168 if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1169 br_vlan_enabled(orig_dev) &&
1170 switchdev_trans_ph_prepare(trans))
1171 err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1178 if (switchdev_trans_ph_commit(trans))
1181 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1182 if (WARN_ON(!bridge_port))
1185 if (!bridge_port->bridge_device->vlan_enabled)
1188 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1191 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1201 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1203 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1204 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1208 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1209 struct mlxsw_sp_bridge_port *bridge_port,
1212 bool lagged = bridge_port->lagged;
1213 char sfdf_pl[MLXSW_REG_SFDF_LEN];
1216 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1217 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1218 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1219 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1221 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1224 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1226 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1227 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1230 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1232 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1233 MLXSW_REG_SFD_OP_WRITE_REMOVE;
1236 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1237 const char *mac, u16 fid,
1238 enum mlxsw_sp_l3proto proto,
1239 const union mlxsw_sp_l3addr *addr,
1240 bool adding, bool dynamic)
1242 enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1249 case MLXSW_SP_L3_PROTO_IPV4:
1250 uip = be32_to_cpu(addr->addr4);
1251 sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1253 case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1259 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1263 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1264 mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1265 mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1266 MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1268 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1269 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1273 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1281 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1282 const char *mac, u16 fid, bool adding,
1283 enum mlxsw_reg_sfd_rec_action action,
1284 enum mlxsw_reg_sfd_rec_policy policy)
1290 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1294 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1295 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1296 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1297 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1301 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1309 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1310 const char *mac, u16 fid, bool adding,
1313 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1314 MLXSW_REG_SFD_REC_ACTION_NOP,
1315 mlxsw_sp_sfd_rec_policy(dynamic));
1318 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1321 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1322 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1323 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1326 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1327 const char *mac, u16 fid, u16 lag_vid,
1328 bool adding, bool dynamic)
1334 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1338 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1339 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1340 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1342 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1343 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1347 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1356 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1357 struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1359 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1360 struct net_device *orig_dev = fdb_info->info.dev;
1361 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1362 struct mlxsw_sp_bridge_device *bridge_device;
1363 struct mlxsw_sp_bridge_port *bridge_port;
1366 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1370 bridge_device = bridge_port->bridge_device;
1371 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1374 if (!mlxsw_sp_port_vlan)
1377 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1378 vid = mlxsw_sp_port_vlan->vid;
1380 if (!bridge_port->lagged)
1381 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1382 bridge_port->system_port,
1383 fdb_info->addr, fid_index,
1386 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1387 bridge_port->lag_id,
1388 fdb_info->addr, fid_index,
1389 vid, adding, false);
1392 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1393 u16 fid, u16 mid_idx, bool adding)
1399 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1403 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1404 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1405 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1406 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1407 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1411 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1419 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1421 bool set_router_port)
1426 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1430 mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1431 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1432 if (mlxsw_sp->ports[i])
1433 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1436 mlxsw_reg_smid_port_mask_set(smid_pl,
1437 mlxsw_sp_router_port(mlxsw_sp), 1);
1439 for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1440 mlxsw_reg_smid_port_set(smid_pl, i, 1);
1442 mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1445 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1450 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1451 u16 mid_idx, bool add)
1453 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1457 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1461 mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1462 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1468 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1469 const unsigned char *addr,
1472 struct mlxsw_sp_mid *mid;
1474 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1475 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1482 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1483 struct mlxsw_sp_bridge_port *bridge_port,
1484 unsigned long *ports_bitmap)
1486 struct mlxsw_sp_port *mlxsw_sp_port;
1487 u64 max_lag_members, i;
1490 if (!bridge_port->lagged) {
1491 set_bit(bridge_port->system_port, ports_bitmap);
1493 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1495 lag_id = bridge_port->lag_id;
1496 for (i = 0; i < max_lag_members; i++) {
1497 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1500 set_bit(mlxsw_sp_port->local_port,
1507 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1508 struct mlxsw_sp_bridge_device *bridge_device,
1509 struct mlxsw_sp *mlxsw_sp)
1511 struct mlxsw_sp_bridge_port *bridge_port;
1513 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1514 if (bridge_port->mrouter) {
1515 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1523 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1524 struct mlxsw_sp_mid *mid,
1525 struct mlxsw_sp_bridge_device *bridge_device)
1533 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1535 if (mid_idx == MLXSW_SP_MID_MAX)
1538 num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1539 alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1540 flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1544 bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
1545 mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1548 err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1549 bridge_device->mrouter);
1550 kfree(flood_bitmap);
1554 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1559 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1564 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1565 struct mlxsw_sp_mid *mid)
1570 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1572 return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1577 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1578 struct mlxsw_sp_bridge_device *bridge_device,
1579 const unsigned char *addr,
1582 struct mlxsw_sp_mid *mid;
1585 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1589 alloc_size = sizeof(unsigned long) *
1590 BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1592 mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1593 if (!mid->ports_in_mid)
1594 goto err_ports_in_mid_alloc;
1596 ether_addr_copy(mid->addr, addr);
1600 if (!bridge_device->multicast_enabled)
1603 if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1604 goto err_write_mdb_entry;
1607 list_add_tail(&mid->list, &bridge_device->mids_list);
1610 err_write_mdb_entry:
1611 kfree(mid->ports_in_mid);
1612 err_ports_in_mid_alloc:
1617 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1618 struct mlxsw_sp_mid *mid)
1620 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1623 clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1624 if (bitmap_empty(mid->ports_in_mid,
1625 mlxsw_core_max_ports(mlxsw_sp->core))) {
1626 err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1627 list_del(&mid->list);
1628 kfree(mid->ports_in_mid);
1634 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1635 const struct switchdev_obj_port_mdb *mdb,
1636 struct switchdev_trans *trans)
1638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1639 struct net_device *orig_dev = mdb->obj.orig_dev;
1640 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1641 struct net_device *dev = mlxsw_sp_port->dev;
1642 struct mlxsw_sp_bridge_device *bridge_device;
1643 struct mlxsw_sp_bridge_port *bridge_port;
1644 struct mlxsw_sp_mid *mid;
1648 if (switchdev_trans_ph_prepare(trans))
1651 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1655 bridge_device = bridge_port->bridge_device;
1656 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1659 if (!mlxsw_sp_port_vlan)
1662 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1664 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1666 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1669 netdev_err(dev, "Unable to allocate MC group\n");
1673 set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1675 if (!bridge_device->multicast_enabled)
1678 if (bridge_port->mrouter)
1681 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1683 netdev_err(dev, "Unable to set SMID\n");
1690 mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1695 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1696 struct mlxsw_sp_bridge_device
1699 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1700 struct mlxsw_sp_mid *mid;
1703 mc_enabled = bridge_device->multicast_enabled;
1705 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1707 mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1710 mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1715 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1716 struct mlxsw_sp_bridge_port *bridge_port,
1719 struct mlxsw_sp_bridge_device *bridge_device;
1720 struct mlxsw_sp_mid *mid;
1722 bridge_device = bridge_port->bridge_device;
1724 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1725 if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1726 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1730 struct mlxsw_sp_span_respin_work {
1731 struct work_struct work;
1732 struct mlxsw_sp *mlxsw_sp;
1735 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1737 struct mlxsw_sp_span_respin_work *respin_work =
1738 container_of(work, struct mlxsw_sp_span_respin_work, work);
1741 mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1746 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1748 struct mlxsw_sp_span_respin_work *respin_work;
1750 respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1754 INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1755 respin_work->mlxsw_sp = mlxsw_sp;
1757 mlxsw_core_schedule_work(&respin_work->work);
1760 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1761 const struct switchdev_obj *obj,
1762 struct switchdev_trans *trans,
1763 struct netlink_ext_ack *extack)
1765 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1766 const struct switchdev_obj_port_vlan *vlan;
1770 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1771 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1772 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
1775 if (switchdev_trans_ph_prepare(trans)) {
1776 /* The event is emitted before the changes are actually
1777 * applied to the bridge. Therefore schedule the respin
1778 * call for later, so that the respin logic sees the
1779 * updated bridge state.
1781 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1784 case SWITCHDEV_OBJ_ID_PORT_MDB:
1785 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1786 SWITCHDEV_OBJ_PORT_MDB(obj),
1798 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1799 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1801 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1802 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1804 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1805 if (WARN_ON(!mlxsw_sp_port_vlan))
1808 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1809 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1810 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1811 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1814 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1815 const struct switchdev_obj_port_vlan *vlan)
1817 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1818 struct net_device *orig_dev = vlan->obj.orig_dev;
1819 struct mlxsw_sp_bridge_port *bridge_port;
1822 if (netif_is_bridge_master(orig_dev))
1825 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1826 if (WARN_ON(!bridge_port))
1829 if (!bridge_port->bridge_device->vlan_enabled)
1832 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1833 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1839 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1840 struct mlxsw_sp_bridge_port *bridge_port,
1841 struct mlxsw_sp_mid *mid)
1843 struct net_device *dev = mlxsw_sp_port->dev;
1846 if (bridge_port->bridge_device->multicast_enabled &&
1847 !bridge_port->mrouter) {
1848 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1850 netdev_err(dev, "Unable to remove port from SMID\n");
1853 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1855 netdev_err(dev, "Unable to remove MC SFD\n");
1860 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1861 const struct switchdev_obj_port_mdb *mdb)
1863 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1864 struct net_device *orig_dev = mdb->obj.orig_dev;
1865 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1866 struct mlxsw_sp_bridge_device *bridge_device;
1867 struct net_device *dev = mlxsw_sp_port->dev;
1868 struct mlxsw_sp_bridge_port *bridge_port;
1869 struct mlxsw_sp_mid *mid;
1872 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1876 bridge_device = bridge_port->bridge_device;
1877 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1880 if (!mlxsw_sp_port_vlan)
1883 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1885 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1887 netdev_err(dev, "Unable to remove port from MC DB\n");
1891 return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1895 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1896 struct mlxsw_sp_bridge_port *bridge_port)
1898 struct mlxsw_sp_bridge_device *bridge_device;
1899 struct mlxsw_sp_mid *mid, *tmp;
1901 bridge_device = bridge_port->bridge_device;
1903 list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1904 if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1905 __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1907 } else if (bridge_device->multicast_enabled &&
1908 bridge_port->mrouter) {
1909 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1914 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1915 const struct switchdev_obj *obj)
1917 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1921 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1922 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1923 SWITCHDEV_OBJ_PORT_VLAN(obj));
1925 case SWITCHDEV_OBJ_ID_PORT_MDB:
1926 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1927 SWITCHDEV_OBJ_PORT_MDB(obj));
1934 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1939 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1942 struct mlxsw_sp_port *mlxsw_sp_port;
1943 u64 max_lag_members;
1946 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1948 for (i = 0; i < max_lag_members; i++) {
1949 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1951 return mlxsw_sp_port;
1956 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1957 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1958 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1962 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1963 struct mlxsw_sp_bridge_port *bridge_port,
1964 struct mlxsw_sp_port *mlxsw_sp_port,
1965 struct netlink_ext_ack *extack)
1967 if (is_vlan_dev(bridge_port->dev)) {
1968 NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1972 /* Port is no longer usable as a router interface */
1973 if (mlxsw_sp_port->default_vlan->fid)
1974 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
1980 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1981 struct mlxsw_sp_bridge_port *bridge_port,
1982 struct mlxsw_sp_port *mlxsw_sp_port)
1984 /* Make sure untagged frames are allowed to ingress */
1985 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1989 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
1990 const struct net_device *vxlan_dev, u16 vid,
1991 struct netlink_ext_ack *extack)
1993 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1994 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
1995 struct mlxsw_sp_nve_params params = {
1996 .type = MLXSW_SP_NVE_TYPE_VXLAN,
1997 .vni = vxlan->cfg.vni,
2000 struct mlxsw_sp_fid *fid;
2003 /* If the VLAN is 0, we need to find the VLAN that is configured as
2004 * PVID and egress untagged on the bridge port of the VxLAN device.
2005 * It is possible no such VLAN exists
2008 err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2013 /* If no other port is member in the VLAN, then the FID does not exist.
2014 * NVE will be enabled on the FID once a port joins the VLAN
2016 fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2020 if (mlxsw_sp_fid_vni_is_set(fid)) {
2021 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2023 goto err_vni_exists;
2026 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2028 goto err_nve_fid_enable;
2030 /* The tunnel port does not hold a reference on the FID. Only
2031 * local ports and the router port
2033 mlxsw_sp_fid_put(fid);
2039 mlxsw_sp_fid_put(fid);
2043 static struct net_device *
2044 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2046 struct net_device *dev;
2047 struct list_head *iter;
2049 netdev_for_each_lower_dev(br_dev, dev, iter) {
2053 if (!netif_is_vxlan(dev))
2056 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2057 if (err || pvid != vid)
2066 static struct mlxsw_sp_fid *
2067 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2068 u16 vid, struct netlink_ext_ack *extack)
2070 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2071 struct net_device *vxlan_dev;
2072 struct mlxsw_sp_fid *fid;
2075 fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2079 if (mlxsw_sp_fid_vni_is_set(fid))
2082 /* Find the VxLAN device that has the specified VLAN configured as
2083 * PVID and egress untagged. There can be at most one such device
2085 vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
2090 if (!netif_running(vxlan_dev))
2093 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
2096 goto err_vxlan_join;
2101 mlxsw_sp_fid_put(fid);
2102 return ERR_PTR(err);
2105 static struct mlxsw_sp_fid *
2106 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2109 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2111 return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2115 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2116 const struct mlxsw_sp_fid *fid)
2118 return mlxsw_sp_fid_8021q_vid(fid);
2121 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2122 .port_join = mlxsw_sp_bridge_8021q_port_join,
2123 .port_leave = mlxsw_sp_bridge_8021q_port_leave,
2124 .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
2125 .fid_get = mlxsw_sp_bridge_8021q_fid_get,
2126 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
2127 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
2131 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2132 const struct net_device *br_dev)
2134 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2136 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2138 if (mlxsw_sp_port_vlan->bridge_port &&
2139 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2148 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2149 struct mlxsw_sp_bridge_port *bridge_port,
2150 struct mlxsw_sp_port *mlxsw_sp_port,
2151 struct netlink_ext_ack *extack)
2153 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2154 struct net_device *dev = bridge_port->dev;
2157 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2158 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2159 if (WARN_ON(!mlxsw_sp_port_vlan))
2162 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2163 NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2167 /* Port is no longer usable as a router interface */
2168 if (mlxsw_sp_port_vlan->fid)
2169 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2171 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2176 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2177 struct mlxsw_sp_bridge_port *bridge_port,
2178 struct mlxsw_sp_port *mlxsw_sp_port)
2180 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2181 struct net_device *dev = bridge_port->dev;
2184 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2185 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2186 if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2189 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2193 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2194 const struct net_device *vxlan_dev, u16 vid,
2195 struct netlink_ext_ack *extack)
2197 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2198 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2199 struct mlxsw_sp_nve_params params = {
2200 .type = MLXSW_SP_NVE_TYPE_VXLAN,
2201 .vni = vxlan->cfg.vni,
2204 struct mlxsw_sp_fid *fid;
2207 fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2209 NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID");
2213 if (mlxsw_sp_fid_vni_is_set(fid)) {
2214 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2216 goto err_vni_exists;
2219 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2221 goto err_nve_fid_enable;
2223 /* The tunnel port does not hold a reference on the FID. Only
2224 * local ports and the router port
2226 mlxsw_sp_fid_put(fid);
2232 mlxsw_sp_fid_put(fid);
2236 static struct mlxsw_sp_fid *
2237 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2238 u16 vid, struct netlink_ext_ack *extack)
2240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2241 struct net_device *vxlan_dev;
2242 struct mlxsw_sp_fid *fid;
2245 fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2249 if (mlxsw_sp_fid_vni_is_set(fid))
2252 vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
2256 if (!netif_running(vxlan_dev))
2259 err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
2262 goto err_vxlan_join;
2267 mlxsw_sp_fid_put(fid);
2268 return ERR_PTR(err);
2271 static struct mlxsw_sp_fid *
2272 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2277 /* The only valid VLAN for a VLAN-unaware bridge is 0 */
2281 return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2285 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2286 const struct mlxsw_sp_fid *fid)
2291 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2292 .port_join = mlxsw_sp_bridge_8021d_port_join,
2293 .port_leave = mlxsw_sp_bridge_8021d_port_leave,
2294 .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
2295 .fid_get = mlxsw_sp_bridge_8021d_fid_get,
2296 .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
2297 .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
2300 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2301 struct net_device *brport_dev,
2302 struct net_device *br_dev,
2303 struct netlink_ext_ack *extack)
2305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2306 struct mlxsw_sp_bridge_device *bridge_device;
2307 struct mlxsw_sp_bridge_port *bridge_port;
2310 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2311 if (IS_ERR(bridge_port))
2312 return PTR_ERR(bridge_port);
2313 bridge_device = bridge_port->bridge_device;
2315 err = bridge_device->ops->port_join(bridge_device, bridge_port,
2316 mlxsw_sp_port, extack);
2323 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2327 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2328 struct net_device *brport_dev,
2329 struct net_device *br_dev)
2331 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2332 struct mlxsw_sp_bridge_device *bridge_device;
2333 struct mlxsw_sp_bridge_port *bridge_port;
2335 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2338 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2342 bridge_device->ops->port_leave(bridge_device, bridge_port,
2344 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2347 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2348 const struct net_device *br_dev,
2349 const struct net_device *vxlan_dev, u16 vid,
2350 struct netlink_ext_ack *extack)
2352 struct mlxsw_sp_bridge_device *bridge_device;
2354 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2355 if (WARN_ON(!bridge_device))
2358 return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2362 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2363 const struct net_device *vxlan_dev)
2365 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2366 struct mlxsw_sp_fid *fid;
2368 /* If the VxLAN device is down, then the FID does not have a VNI */
2369 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2373 mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2374 mlxsw_sp_fid_put(fid);
2377 struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2378 const struct net_device *br_dev,
2380 struct netlink_ext_ack *extack)
2382 struct mlxsw_sp_bridge_device *bridge_device;
2384 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2385 if (WARN_ON(!bridge_device))
2386 return ERR_PTR(-EINVAL);
2388 return bridge_device->ops->fid_get(bridge_device, vid, extack);
2392 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2393 enum mlxsw_sp_l3proto *proto,
2394 union mlxsw_sp_l3addr *addr)
2396 if (vxlan_addr->sa.sa_family == AF_INET) {
2397 addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2398 *proto = MLXSW_SP_L3_PROTO_IPV4;
2400 addr->addr6 = vxlan_addr->sin6.sin6_addr;
2401 *proto = MLXSW_SP_L3_PROTO_IPV6;
2406 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2407 const union mlxsw_sp_l3addr *addr,
2408 union vxlan_addr *vxlan_addr)
2411 case MLXSW_SP_L3_PROTO_IPV4:
2412 vxlan_addr->sa.sa_family = AF_INET;
2413 vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2415 case MLXSW_SP_L3_PROTO_IPV6:
2416 vxlan_addr->sa.sa_family = AF_INET6;
2417 vxlan_addr->sin6.sin6_addr = addr->addr6;
2422 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2424 enum mlxsw_sp_l3proto proto,
2425 union mlxsw_sp_l3addr *addr,
2426 __be32 vni, bool adding)
2428 struct switchdev_notifier_vxlan_fdb_info info;
2429 struct vxlan_dev *vxlan = netdev_priv(dev);
2430 enum switchdev_notifier_type type;
2432 type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2433 SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2434 mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2435 info.remote_port = vxlan->cfg.dst_port;
2436 info.remote_vni = vni;
2437 info.remote_ifindex = 0;
2438 ether_addr_copy(info.eth_addr, mac);
2440 info.offloaded = adding;
2441 call_switchdev_notifiers(type, dev, &info.info, NULL);
2444 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2446 enum mlxsw_sp_l3proto proto,
2447 union mlxsw_sp_l3addr *addr,
2451 if (netif_is_vxlan(dev))
2452 mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2457 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2458 const char *mac, u16 vid,
2459 struct net_device *dev, bool offloaded)
2461 struct switchdev_notifier_fdb_info info;
2465 info.offloaded = offloaded;
2466 call_switchdev_notifiers(type, dev, &info.info, NULL);
2469 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2470 char *sfn_pl, int rec_index,
2473 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2474 struct mlxsw_sp_bridge_device *bridge_device;
2475 struct mlxsw_sp_bridge_port *bridge_port;
2476 struct mlxsw_sp_port *mlxsw_sp_port;
2477 enum switchdev_notifier_type type;
2481 bool do_notification = true;
2484 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2485 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2486 if (!mlxsw_sp_port) {
2487 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2491 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2492 if (!mlxsw_sp_port_vlan) {
2493 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2497 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2499 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2503 bridge_device = bridge_port->bridge_device;
2504 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2507 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2510 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2514 if (!do_notification)
2516 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2517 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2523 do_notification = false;
2527 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2528 char *sfn_pl, int rec_index,
2531 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2532 struct mlxsw_sp_bridge_device *bridge_device;
2533 struct mlxsw_sp_bridge_port *bridge_port;
2534 struct mlxsw_sp_port *mlxsw_sp_port;
2535 enum switchdev_notifier_type type;
2540 bool do_notification = true;
2543 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2544 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2545 if (!mlxsw_sp_port) {
2546 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2550 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2551 if (!mlxsw_sp_port_vlan) {
2552 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2556 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2558 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2562 bridge_device = bridge_port->bridge_device;
2563 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2564 lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2565 mlxsw_sp_port_vlan->vid : 0;
2568 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2571 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2575 if (!do_notification)
2577 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2578 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2584 do_notification = false;
2589 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2590 const struct mlxsw_sp_fid *fid,
2592 struct net_device **nve_dev,
2593 u16 *p_vid, __be32 *p_vni)
2595 struct mlxsw_sp_bridge_device *bridge_device;
2596 struct net_device *br_dev, *dev;
2600 err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2604 err = mlxsw_sp_fid_vni(fid, p_vni);
2608 dev = __dev_get_by_index(&init_net, nve_ifindex);
2613 if (!netif_running(dev))
2616 if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2619 if (adding && netif_is_vxlan(dev)) {
2620 struct vxlan_dev *vxlan = netdev_priv(dev);
2622 if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2626 br_dev = netdev_master_upper_dev_get(dev);
2630 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2634 *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2639 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2644 enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2645 enum switchdev_notifier_type type;
2646 struct net_device *nve_dev;
2647 union mlxsw_sp_l3addr addr;
2648 struct mlxsw_sp_fid *fid;
2655 mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2658 fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2660 goto err_fid_lookup;
2662 err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2663 (enum mlxsw_sp_l3proto) sfn_proto,
2666 goto err_ip_resolve;
2668 err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2669 &nve_dev, &vid, &vni);
2671 goto err_fdb_process;
2673 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2674 (enum mlxsw_sp_l3proto) sfn_proto,
2675 &addr, adding, true);
2679 mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2680 (enum mlxsw_sp_l3proto) sfn_proto,
2681 &addr, vni, adding);
2683 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2684 SWITCHDEV_FDB_DEL_TO_BRIDGE;
2685 mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2687 mlxsw_sp_fid_put(fid);
2694 mlxsw_sp_fid_put(fid);
2696 /* Remove an FDB entry in case we cannot process it. Otherwise the
2697 * device will keep sending the same notification over and over again.
2699 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2700 (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2704 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2705 char *sfn_pl, int rec_index)
2707 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2708 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2709 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2712 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2713 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2716 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2717 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2720 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2721 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2724 case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2725 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2728 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2729 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2735 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2737 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2739 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2740 msecs_to_jiffies(bridge->fdb_notify.interval));
2743 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2745 struct mlxsw_sp_bridge *bridge;
2746 struct mlxsw_sp *mlxsw_sp;
2752 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2756 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2757 mlxsw_sp = bridge->mlxsw_sp;
2760 mlxsw_reg_sfn_pack(sfn_pl);
2761 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2763 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2766 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2767 for (i = 0; i < num_rec; i++)
2768 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2773 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2776 struct mlxsw_sp_switchdev_event_work {
2777 struct work_struct work;
2779 struct switchdev_notifier_fdb_info fdb_info;
2780 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2782 struct net_device *dev;
2783 unsigned long event;
2787 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2788 struct mlxsw_sp_switchdev_event_work *
2790 struct mlxsw_sp_fid *fid, __be32 vni)
2792 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2793 struct switchdev_notifier_fdb_info *fdb_info;
2794 struct net_device *dev = switchdev_work->dev;
2795 enum mlxsw_sp_l3proto proto;
2796 union mlxsw_sp_l3addr addr;
2799 fdb_info = &switchdev_work->fdb_info;
2800 err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2804 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2807 switch (switchdev_work->event) {
2808 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2809 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2810 vxlan_fdb_info.eth_addr,
2811 mlxsw_sp_fid_index(fid),
2812 proto, &addr, true, false);
2815 vxlan_fdb_info.offloaded = true;
2816 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2817 &vxlan_fdb_info.info, NULL);
2818 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2819 vxlan_fdb_info.eth_addr,
2820 fdb_info->vid, dev, true);
2822 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2823 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2824 vxlan_fdb_info.eth_addr,
2825 mlxsw_sp_fid_index(fid),
2826 proto, &addr, false,
2828 vxlan_fdb_info.offloaded = false;
2829 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2830 &vxlan_fdb_info.info, NULL);
2836 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2839 struct mlxsw_sp_bridge_device *bridge_device;
2840 struct net_device *dev = switchdev_work->dev;
2841 struct net_device *br_dev;
2842 struct mlxsw_sp *mlxsw_sp;
2843 struct mlxsw_sp_fid *fid;
2847 if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2848 switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2851 if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2852 !switchdev_work->fdb_info.added_by_user)
2855 if (!netif_running(dev))
2857 br_dev = netdev_master_upper_dev_get(dev);
2860 if (!netif_is_bridge_master(br_dev))
2862 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2865 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2869 fid = bridge_device->ops->fid_lookup(bridge_device,
2870 switchdev_work->fdb_info.vid);
2874 err = mlxsw_sp_fid_vni(fid, &vni);
2878 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2882 mlxsw_sp_fid_put(fid);
2885 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2887 struct mlxsw_sp_switchdev_event_work *switchdev_work =
2888 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2889 struct net_device *dev = switchdev_work->dev;
2890 struct switchdev_notifier_fdb_info *fdb_info;
2891 struct mlxsw_sp_port *mlxsw_sp_port;
2895 if (netif_is_vxlan(dev)) {
2896 mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2900 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2904 switch (switchdev_work->event) {
2905 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2906 fdb_info = &switchdev_work->fdb_info;
2907 if (!fdb_info->added_by_user)
2909 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2912 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2914 fdb_info->vid, dev, true);
2916 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2917 fdb_info = &switchdev_work->fdb_info;
2918 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2920 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2921 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2922 /* These events are only used to potentially update an existing
2928 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2932 kfree(switchdev_work->fdb_info.addr);
2933 kfree(switchdev_work);
2938 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2939 struct mlxsw_sp_switchdev_event_work *
2942 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2943 struct mlxsw_sp_bridge_device *bridge_device;
2944 struct net_device *dev = switchdev_work->dev;
2945 u8 all_zeros_mac[ETH_ALEN] = { 0 };
2946 enum mlxsw_sp_l3proto proto;
2947 union mlxsw_sp_l3addr addr;
2948 struct net_device *br_dev;
2949 struct mlxsw_sp_fid *fid;
2953 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2954 br_dev = netdev_master_upper_dev_get(dev);
2956 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2960 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2964 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2967 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2968 err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2970 mlxsw_sp_fid_put(fid);
2973 vxlan_fdb_info->offloaded = true;
2974 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2975 &vxlan_fdb_info->info, NULL);
2976 mlxsw_sp_fid_put(fid);
2980 /* The device has a single FDB table, whereas Linux has two - one
2981 * in the bridge driver and another in the VxLAN driver. We only
2982 * program an entry to the device if the MAC points to the VxLAN
2983 * device in the bridge's FDB table
2985 vid = bridge_device->ops->fid_vid(bridge_device, fid);
2986 if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2987 goto err_br_fdb_find;
2989 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2990 mlxsw_sp_fid_index(fid), proto,
2991 &addr, true, false);
2993 goto err_fdb_tunnel_uc_op;
2994 vxlan_fdb_info->offloaded = true;
2995 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2996 &vxlan_fdb_info->info, NULL);
2997 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2998 vxlan_fdb_info->eth_addr, vid, dev, true);
3000 mlxsw_sp_fid_put(fid);
3004 err_fdb_tunnel_uc_op:
3006 mlxsw_sp_fid_put(fid);
3010 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3011 struct mlxsw_sp_switchdev_event_work *
3014 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3015 struct mlxsw_sp_bridge_device *bridge_device;
3016 struct net_device *dev = switchdev_work->dev;
3017 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3018 u8 all_zeros_mac[ETH_ALEN] = { 0 };
3019 enum mlxsw_sp_l3proto proto;
3020 union mlxsw_sp_l3addr addr;
3021 struct mlxsw_sp_fid *fid;
3024 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3026 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3030 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3034 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3037 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3038 mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3039 mlxsw_sp_fid_put(fid);
3043 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3044 mlxsw_sp_fid_index(fid), proto, &addr,
3046 vid = bridge_device->ops->fid_vid(bridge_device, fid);
3047 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3048 vxlan_fdb_info->eth_addr, vid, dev, false);
3050 mlxsw_sp_fid_put(fid);
3053 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3055 struct mlxsw_sp_switchdev_event_work *switchdev_work =
3056 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3057 struct net_device *dev = switchdev_work->dev;
3058 struct mlxsw_sp *mlxsw_sp;
3059 struct net_device *br_dev;
3063 if (!netif_running(dev))
3065 br_dev = netdev_master_upper_dev_get(dev);
3068 if (!netif_is_bridge_master(br_dev))
3070 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3074 switch (switchdev_work->event) {
3075 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3076 mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3078 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3079 mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3085 kfree(switchdev_work);
3090 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3092 struct switchdev_notifier_info *info)
3094 struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3095 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3096 struct vxlan_config *cfg = &vxlan->cfg;
3097 struct netlink_ext_ack *extack;
3099 extack = switchdev_notifier_info_to_extack(info);
3100 vxlan_fdb_info = container_of(info,
3101 struct switchdev_notifier_vxlan_fdb_info,
3104 if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3105 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3108 if (vxlan_fdb_info->remote_vni != cfg->vni ||
3109 vxlan_fdb_info->vni != cfg->vni) {
3110 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3113 if (vxlan_fdb_info->remote_ifindex) {
3114 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3117 if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3118 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3121 if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3122 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3126 switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3131 /* Called under rcu_read_lock() */
3132 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3133 unsigned long event, void *ptr)
3135 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3136 struct mlxsw_sp_switchdev_event_work *switchdev_work;
3137 struct switchdev_notifier_fdb_info *fdb_info;
3138 struct switchdev_notifier_info *info = ptr;
3139 struct net_device *br_dev;
3142 /* Tunnel devices are not our uppers, so check their master instead */
3143 br_dev = netdev_master_upper_dev_get_rcu(dev);
3146 if (!netif_is_bridge_master(br_dev))
3148 if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3151 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3152 if (!switchdev_work)
3155 switchdev_work->dev = dev;
3156 switchdev_work->event = event;
3159 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
3160 case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
3161 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
3162 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3163 fdb_info = container_of(info,
3164 struct switchdev_notifier_fdb_info,
3166 INIT_WORK(&switchdev_work->work,
3167 mlxsw_sp_switchdev_bridge_fdb_event_work);
3168 memcpy(&switchdev_work->fdb_info, ptr,
3169 sizeof(switchdev_work->fdb_info));
3170 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3171 if (!switchdev_work->fdb_info.addr)
3172 goto err_addr_alloc;
3173 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3175 /* Take a reference on the device. This can be either
3176 * upper device containig mlxsw_sp_port or just a
3181 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
3182 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3183 INIT_WORK(&switchdev_work->work,
3184 mlxsw_sp_switchdev_vxlan_fdb_event_work);
3185 err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3188 goto err_vxlan_work_prepare;
3192 kfree(switchdev_work);
3196 mlxsw_core_schedule_work(&switchdev_work->work);
3200 err_vxlan_work_prepare:
3202 kfree(switchdev_work);
3206 struct notifier_block mlxsw_sp_switchdev_notifier = {
3207 .notifier_call = mlxsw_sp_switchdev_event,
3211 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3212 struct mlxsw_sp_bridge_device *bridge_device,
3213 const struct net_device *vxlan_dev, u16 vid,
3214 bool flag_untagged, bool flag_pvid,
3215 struct netlink_ext_ack *extack)
3217 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3218 __be32 vni = vxlan->cfg.vni;
3219 struct mlxsw_sp_fid *fid;
3223 /* We cannot have the same VLAN as PVID and egress untagged on multiple
3224 * VxLAN devices. Note that we get this notification before the VLAN is
3225 * actually added to the bridge's database, so it is not possible for
3226 * the lookup function to return 'vxlan_dev'
3228 if (flag_untagged && flag_pvid &&
3229 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3230 NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3234 if (!netif_running(vxlan_dev))
3237 /* First case: FID is not associated with this VNI, but the new VLAN
3238 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3241 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3243 if (!flag_untagged || !flag_pvid)
3245 return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
3246 vxlan_dev, vid, extack);
3249 /* Second case: FID is associated with the VNI and the VLAN associated
3250 * with the FID is the same as the notified VLAN. This means the flags
3251 * (PVID / egress untagged) were toggled and that NVE should be
3252 * disabled on the FID
3254 old_vid = mlxsw_sp_fid_8021q_vid(fid);
3255 if (vid == old_vid) {
3256 if (WARN_ON(flag_untagged && flag_pvid)) {
3257 mlxsw_sp_fid_put(fid);
3260 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3261 mlxsw_sp_fid_put(fid);
3265 /* Third case: A new VLAN was configured on the VxLAN device, but this
3266 * VLAN is not PVID, so there is nothing to do.
3269 mlxsw_sp_fid_put(fid);
3273 /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3274 * mapped to the VNI should be unmapped
3276 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3277 mlxsw_sp_fid_put(fid);
3279 /* Fifth case: The new VLAN is also egress untagged, which means the
3280 * VLAN needs to be mapped to the VNI
3285 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
3288 goto err_vxlan_join;
3293 mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
3299 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3300 struct mlxsw_sp_bridge_device *bridge_device,
3301 const struct net_device *vxlan_dev, u16 vid)
3303 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3304 __be32 vni = vxlan->cfg.vni;
3305 struct mlxsw_sp_fid *fid;
3307 if (!netif_running(vxlan_dev))
3310 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3314 /* A different VLAN than the one mapped to the VNI is deleted */
3315 if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3318 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3321 mlxsw_sp_fid_put(fid);
3325 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3326 struct switchdev_notifier_port_obj_info *
3329 struct switchdev_obj_port_vlan *vlan =
3330 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3331 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3332 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3333 struct switchdev_trans *trans = port_obj_info->trans;
3334 struct mlxsw_sp_bridge_device *bridge_device;
3335 struct netlink_ext_ack *extack;
3336 struct mlxsw_sp *mlxsw_sp;
3337 struct net_device *br_dev;
3340 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3341 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3345 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3349 port_obj_info->handled = true;
3351 if (switchdev_trans_ph_commit(trans))
3354 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3358 if (!bridge_device->vlan_enabled)
3361 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
3364 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3376 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3377 struct switchdev_notifier_port_obj_info *
3380 struct switchdev_obj_port_vlan *vlan =
3381 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3382 struct mlxsw_sp_bridge_device *bridge_device;
3383 struct mlxsw_sp *mlxsw_sp;
3384 struct net_device *br_dev;
3387 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3391 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3395 port_obj_info->handled = true;
3397 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3401 if (!bridge_device->vlan_enabled)
3404 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
3405 mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
3410 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3411 struct switchdev_notifier_port_obj_info *
3416 switch (port_obj_info->obj->id) {
3417 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3418 err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3429 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3430 struct switchdev_notifier_port_obj_info *
3433 switch (port_obj_info->obj->id) {
3434 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3435 mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3442 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3443 unsigned long event, void *ptr)
3445 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3449 case SWITCHDEV_PORT_OBJ_ADD:
3450 if (netif_is_vxlan(dev))
3451 err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3453 err = switchdev_handle_port_obj_add(dev, ptr,
3454 mlxsw_sp_port_dev_check,
3455 mlxsw_sp_port_obj_add);
3456 return notifier_from_errno(err);
3457 case SWITCHDEV_PORT_OBJ_DEL:
3458 if (netif_is_vxlan(dev))
3459 mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3461 err = switchdev_handle_port_obj_del(dev, ptr,
3462 mlxsw_sp_port_dev_check,
3463 mlxsw_sp_port_obj_del);
3464 return notifier_from_errno(err);
3470 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3471 .notifier_call = mlxsw_sp_switchdev_blocking_event,
3475 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3477 return bridge_port->stp_state;
3480 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3482 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3483 struct notifier_block *nb;
3486 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3488 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3492 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3494 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3498 nb = &mlxsw_sp_switchdev_blocking_notifier;
3499 err = register_switchdev_blocking_notifier(nb);
3501 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3502 goto err_register_switchdev_blocking_notifier;
3505 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3506 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3507 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
3510 err_register_switchdev_blocking_notifier:
3511 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3515 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3517 struct notifier_block *nb;
3519 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3521 nb = &mlxsw_sp_switchdev_blocking_notifier;
3522 unregister_switchdev_blocking_notifier(nb);
3524 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3527 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3529 struct mlxsw_sp_bridge *bridge;
3531 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3534 mlxsw_sp->bridge = bridge;
3535 bridge->mlxsw_sp = mlxsw_sp;
3537 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3539 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3540 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3542 return mlxsw_sp_fdb_init(mlxsw_sp);
3545 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3547 mlxsw_sp_fdb_fini(mlxsw_sp);
3548 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3549 kfree(mlxsw_sp->bridge);
3552 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
3554 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
3557 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)