2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
61 fid = f ? f->fid : fid;
64 fid = mlxsw_sp_port->pvid;
69 static struct mlxsw_sp_port *
70 mlxsw_sp_port_orig_get(struct net_device *dev,
71 struct mlxsw_sp_port *mlxsw_sp_port)
73 struct mlxsw_sp_port *mlxsw_sp_vport;
74 struct mlxsw_sp_fid *fid;
77 if (netif_is_bridge_master(dev)) {
78 fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
82 mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
84 WARN_ON(!mlxsw_sp_vport);
85 return mlxsw_sp_vport;
89 if (!is_vlan_dev(dev))
92 vid = vlan_dev_vlan_id(dev);
93 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
94 WARN_ON(!mlxsw_sp_vport);
96 return mlxsw_sp_vport;
99 static int mlxsw_sp_port_attr_get(struct net_device *dev,
100 struct switchdev_attr *attr)
102 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
105 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
110 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
111 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
112 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
113 attr->u.ppid.id_len);
115 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
116 attr->u.brport_flags =
117 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
118 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
119 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
128 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
132 enum mlxsw_reg_spms_state spms_state;
138 case BR_STATE_FORWARDING:
139 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
141 case BR_STATE_LEARNING:
142 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
144 case BR_STATE_LISTENING: /* fall-through */
145 case BR_STATE_DISABLED: /* fall-through */
146 case BR_STATE_BLOCKING:
147 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
153 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
156 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
158 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
159 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
160 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
162 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
163 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
166 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
171 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
172 struct switchdev_trans *trans,
175 if (switchdev_trans_ph_prepare(trans))
178 mlxsw_sp_port->stp_state = state;
179 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
182 static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
183 u16 idx_begin, u16 idx_end,
184 enum mlxsw_sp_flood_table table,
187 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
188 u16 local_port = mlxsw_sp_port->local_port;
189 enum mlxsw_flood_table_type table_type;
190 u16 range = idx_end - idx_begin + 1;
194 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
195 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
197 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
199 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
203 mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
204 table_type, range, local_port, set);
205 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
211 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
212 u16 idx_begin, u16 idx_end, bool uc_set,
213 bool bc_set, bool mc_set)
217 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
218 MLXSW_SP_FLOOD_TABLE_UC, uc_set);
222 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
223 MLXSW_SP_FLOOD_TABLE_BC, bc_set);
225 goto err_flood_bm_set;
227 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
228 MLXSW_SP_FLOOD_TABLE_MC, mc_set);
230 goto err_flood_mc_set;
234 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
235 MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
237 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
238 MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
242 static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
243 enum mlxsw_sp_flood_table table,
246 struct net_device *dev = mlxsw_sp_port->dev;
247 u16 vid, last_visited_vid;
250 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
251 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
252 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
254 return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
258 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
259 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
262 last_visited_vid = vid;
263 goto err_port_flood_set;
270 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
271 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
273 netdev_err(dev, "Failed to configure unicast flooding\n");
277 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
278 struct switchdev_trans *trans,
284 if (switchdev_trans_ph_prepare(trans))
287 if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
289 mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
290 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
291 MLXSW_SP_FLOOD_TABLE_MC,
296 mlxsw_sp_port->mc_disabled = mc_disabled;
301 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
307 /* In case of vFIDs, index into the flooding table is relative to
308 * the start of the vFIDs range.
310 vfid = mlxsw_sp_fid_to_vfid(fid);
313 mc_set = mlxsw_sp_vport->mc_disabled ?
314 mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
316 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
320 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
326 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
327 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
329 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
333 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
334 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
337 goto err_port_vid_learning_set;
342 err_port_vid_learning_set:
343 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
344 __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
348 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
349 struct switchdev_trans *trans,
350 unsigned long brport_flags)
352 unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
353 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
356 if (!mlxsw_sp_port->bridged)
359 if (switchdev_trans_ph_prepare(trans))
362 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
363 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
364 MLXSW_SP_FLOOD_TABLE_UC,
365 !mlxsw_sp_port->uc_flood);
370 if ((learning ^ brport_flags) & BR_LEARNING) {
371 err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
372 !mlxsw_sp_port->learning);
374 goto err_port_learning_set;
377 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
378 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
379 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
383 err_port_learning_set:
384 if ((uc_flood ^ brport_flags) & BR_FLOOD)
385 mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
386 MLXSW_SP_FLOOD_TABLE_UC,
387 mlxsw_sp_port->uc_flood);
391 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
393 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
396 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
397 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
400 mlxsw_sp->ageing_time = ageing_time;
404 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
405 struct switchdev_trans *trans,
406 unsigned long ageing_clock_t)
408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
410 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
412 if (switchdev_trans_ph_prepare(trans)) {
413 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
414 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
420 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
423 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
424 struct switchdev_trans *trans,
425 struct net_device *orig_dev,
428 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
430 /* SWITCHDEV_TRANS_PREPARE phase */
431 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
432 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
439 static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
440 struct switchdev_trans *trans,
441 bool is_port_mc_router)
443 if (switchdev_trans_ph_prepare(trans))
446 mlxsw_sp_port->mc_router = is_port_mc_router;
447 if (!mlxsw_sp_port->mc_disabled)
448 return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
449 MLXSW_SP_FLOOD_TABLE_MC,
455 static int mlxsw_sp_port_attr_set(struct net_device *dev,
456 const struct switchdev_attr *attr,
457 struct switchdev_trans *trans)
459 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
462 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
467 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
468 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
471 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
472 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
473 attr->u.brport_flags);
475 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
476 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
477 attr->u.ageing_time);
479 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
480 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
482 attr->u.vlan_filtering);
484 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
485 err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
488 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
489 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
490 attr->u.mc_disabled);
500 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
502 char sfmr_pl[MLXSW_REG_SFMR_LEN];
504 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
508 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
510 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
511 char svfa_pl[MLXSW_REG_SVFA_LEN];
513 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
514 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
517 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
519 struct mlxsw_sp_fid *f;
521 f = kzalloc(sizeof(*f), GFP_KERNEL);
530 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
532 struct mlxsw_sp_fid *f;
535 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
539 /* Although all the ports member in the FID might be using a
540 * {Port, VID} to FID mapping, we create a global VID-to-FID
541 * mapping. This allows a port to transition to VLAN mode,
542 * knowing the global mapping exists.
544 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
548 f = mlxsw_sp_fid_alloc(fid);
551 goto err_allocate_fid;
554 list_add(&f->list, &mlxsw_sp->fids);
559 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
561 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
565 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
572 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
576 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
578 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
581 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
584 struct mlxsw_sp_fid *f;
586 if (test_bit(fid, mlxsw_sp_port->active_vlans))
589 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
591 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
598 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
603 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
606 struct mlxsw_sp_fid *f;
608 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
612 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
614 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
616 if (--f->ref_count == 0)
617 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
620 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
623 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
625 /* If port doesn't have vPorts, then it can use the global
626 * VID-to-FID mapping.
628 if (list_empty(&mlxsw_sp_port->vports_list))
631 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
634 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
635 u16 fid_begin, u16 fid_end)
640 for (fid = fid_begin; fid <= fid_end; fid++) {
641 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
643 goto err_port_fid_join;
646 mc_flood = mlxsw_sp_port->mc_disabled ?
647 mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
649 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
650 mlxsw_sp_port->uc_flood, true,
653 goto err_port_flood_set;
655 for (fid = fid_begin; fid <= fid_end; fid++) {
656 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
658 goto err_port_fid_map;
664 for (fid--; fid >= fid_begin; fid--)
665 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
666 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
671 for (fid--; fid >= fid_begin; fid--)
672 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
676 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
677 u16 fid_begin, u16 fid_end)
681 for (fid = fid_begin; fid <= fid_end; fid++)
682 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
684 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
687 for (fid = fid_begin; fid <= fid_end; fid++)
688 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
691 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
694 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
695 char spvid_pl[MLXSW_REG_SPVID_LEN];
697 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
701 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
704 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
705 char spaft_pl[MLXSW_REG_SPAFT_LEN];
707 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
708 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
711 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
713 struct net_device *dev = mlxsw_sp_port->dev;
717 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
719 netdev_err(dev, "Failed to disallow untagged traffic\n");
723 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
725 netdev_err(dev, "Failed to set PVID\n");
729 /* Only allow if not already allowed. */
730 if (!mlxsw_sp_port->pvid) {
731 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
734 netdev_err(dev, "Failed to allow untagged traffic\n");
735 goto err_port_allow_untagged_set;
740 mlxsw_sp_port->pvid = vid;
743 err_port_allow_untagged_set:
744 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
748 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
749 u16 vid_begin, u16 vid_end,
755 for (vid = vid_begin; vid <= vid_end;
756 vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
757 vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
760 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
761 vid_e, learn_enable);
769 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
770 u16 vid_begin, u16 vid_end,
771 bool flag_untagged, bool flag_pvid)
773 struct net_device *dev = mlxsw_sp_port->dev;
777 if (!mlxsw_sp_port->bridged)
780 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
782 netdev_err(dev, "Failed to join FIDs\n");
786 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
787 true, flag_untagged);
789 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
791 goto err_port_vlans_set;
794 old_pvid = mlxsw_sp_port->pvid;
795 if (flag_pvid && old_pvid != vid_begin) {
796 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
798 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
799 goto err_port_pvid_set;
801 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
802 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
804 netdev_err(dev, "Unable to del PVID\n");
805 goto err_port_pvid_set;
809 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
810 mlxsw_sp_port->learning);
812 netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
814 goto err_port_vid_learning_set;
817 /* Changing activity bits only if HW operation succeded */
818 for (vid = vid_begin; vid <= vid_end; vid++) {
819 set_bit(vid, mlxsw_sp_port->active_vlans);
821 set_bit(vid, mlxsw_sp_port->untagged_vlans);
823 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
826 /* STP state change must be done after we set active VLANs */
827 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
828 mlxsw_sp_port->stp_state);
830 netdev_err(dev, "Failed to set STP state\n");
831 goto err_port_stp_state_set;
836 err_port_stp_state_set:
837 for (vid = vid_begin; vid <= vid_end; vid++)
838 clear_bit(vid, mlxsw_sp_port->active_vlans);
839 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
841 err_port_vid_learning_set:
842 if (old_pvid != mlxsw_sp_port->pvid)
843 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
845 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
848 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
852 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
853 const struct switchdev_obj_port_vlan *vlan,
854 struct switchdev_trans *trans)
856 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
857 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
859 if (switchdev_trans_ph_prepare(trans))
862 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
863 vlan->vid_begin, vlan->vid_end,
864 flag_untagged, flag_pvid);
867 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
869 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
870 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
873 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
875 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
876 MLXSW_REG_SFD_OP_WRITE_REMOVE;
879 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
880 const char *mac, u16 fid, bool adding,
881 enum mlxsw_reg_sfd_rec_action action,
887 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
891 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
892 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
893 mac, fid, action, local_port);
894 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
900 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
901 const char *mac, u16 fid, bool adding,
904 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
905 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
908 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
911 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
912 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
916 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
917 const char *mac, u16 fid, u16 lag_vid,
918 bool adding, bool dynamic)
923 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
927 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
928 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
929 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
931 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
938 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
939 const struct switchdev_obj_port_fdb *fdb,
940 struct switchdev_trans *trans)
942 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
945 if (switchdev_trans_ph_prepare(trans))
948 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
949 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
952 if (!mlxsw_sp_port->lagged)
953 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
954 mlxsw_sp_port->local_port,
955 fdb->addr, fid, true, false);
957 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
958 mlxsw_sp_port->lag_id,
959 fdb->addr, fid, lag_vid,
963 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
964 u16 fid, u16 mid, bool adding)
969 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
973 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
974 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
975 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
976 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
981 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
982 bool add, bool clear_all_ports)
984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
988 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
992 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
993 if (clear_all_ports) {
994 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
995 if (mlxsw_sp->ports[i])
996 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
998 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1003 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
1004 const unsigned char *addr,
1007 struct mlxsw_sp_mid *mid;
1009 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
1010 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1016 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1017 const unsigned char *addr,
1020 struct mlxsw_sp_mid *mid;
1023 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
1025 if (mid_idx == MLXSW_SP_MID_MAX)
1028 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1032 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
1033 ether_addr_copy(mid->addr, addr);
1037 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
1042 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
1043 struct mlxsw_sp_mid *mid)
1045 if (--mid->ref_count == 0) {
1046 list_del(&mid->list);
1047 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
1054 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1055 const struct switchdev_obj_port_mdb *mdb,
1056 struct switchdev_trans *trans)
1058 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1059 struct net_device *dev = mlxsw_sp_port->dev;
1060 struct mlxsw_sp_mid *mid;
1061 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1064 if (switchdev_trans_ph_prepare(trans))
1067 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1069 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
1071 netdev_err(dev, "Unable to allocate MC group\n");
1077 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
1078 mid->ref_count == 1);
1080 netdev_err(dev, "Unable to set SMID\n");
1084 if (mid->ref_count == 1) {
1085 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
1088 netdev_err(dev, "Unable to set MC SFD\n");
1096 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
1100 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1101 const struct switchdev_obj *obj,
1102 struct switchdev_trans *trans)
1104 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1107 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1112 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1113 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1116 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
1117 SWITCHDEV_OBJ_PORT_VLAN(obj),
1120 case SWITCHDEV_OBJ_ID_PORT_FDB:
1121 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1122 SWITCHDEV_OBJ_PORT_FDB(obj),
1125 case SWITCHDEV_OBJ_ID_PORT_MDB:
1126 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1127 SWITCHDEV_OBJ_PORT_MDB(obj),
1138 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1139 u16 vid_begin, u16 vid_end)
1143 if (!mlxsw_sp_port->bridged)
1146 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
1149 pvid = mlxsw_sp_port->pvid;
1150 if (pvid >= vid_begin && pvid <= vid_end)
1151 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1153 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
1156 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1158 /* Changing activity bits only if HW operation succeded */
1159 for (vid = vid_begin; vid <= vid_end; vid++)
1160 clear_bit(vid, mlxsw_sp_port->active_vlans);
1165 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1166 const struct switchdev_obj_port_vlan *vlan)
1168 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1172 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1176 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1177 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1181 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1182 const struct switchdev_obj_port_fdb *fdb)
1184 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1187 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1188 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1191 if (!mlxsw_sp_port->lagged)
1192 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1193 mlxsw_sp_port->local_port,
1197 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1198 mlxsw_sp_port->lag_id,
1199 fdb->addr, fid, lag_vid,
1203 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1204 const struct switchdev_obj_port_mdb *mdb)
1206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1207 struct net_device *dev = mlxsw_sp_port->dev;
1208 struct mlxsw_sp_mid *mid;
1209 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1213 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1215 netdev_err(dev, "Unable to remove port from MC DB\n");
1219 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1221 netdev_err(dev, "Unable to remove port from SMID\n");
1224 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1225 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1228 netdev_err(dev, "Unable to remove MC SFD\n");
1234 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1235 const struct switchdev_obj *obj)
1237 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1240 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1245 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1246 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1249 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1250 SWITCHDEV_OBJ_PORT_VLAN(obj));
1252 case SWITCHDEV_OBJ_ID_PORT_FDB:
1253 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1254 SWITCHDEV_OBJ_PORT_FDB(obj));
1256 case SWITCHDEV_OBJ_ID_PORT_MDB:
1257 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1258 SWITCHDEV_OBJ_PORT_MDB(obj));
1268 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1271 struct mlxsw_sp_port *mlxsw_sp_port;
1272 u64 max_lag_members;
1275 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1277 for (i = 0; i < max_lag_members; i++) {
1278 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1280 return mlxsw_sp_port;
1285 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1286 struct switchdev_obj_port_fdb *fdb,
1287 switchdev_obj_dump_cb_t *cb,
1288 struct net_device *orig_dev)
1290 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1291 struct mlxsw_sp_port *tmp;
1292 struct mlxsw_sp_fid *f;
1304 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1308 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1309 vport_fid = f ? f->fid : 0;
1311 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1313 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1314 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1318 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1320 /* Even in case of error, we have to run the dump to the end
1321 * so the session in firmware is finished.
1326 for (i = 0; i < num_rec; i++) {
1327 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1328 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1329 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1331 if (local_port == mlxsw_sp_port->local_port) {
1332 if (vport_fid && vport_fid == fid)
1334 else if (!vport_fid &&
1335 !mlxsw_sp_fid_is_vfid(fid))
1339 ether_addr_copy(fdb->addr, mac);
1340 fdb->ndm_state = NUD_REACHABLE;
1341 err = cb(&fdb->obj);
1346 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1347 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1348 mac, &fid, &lag_id);
1349 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1350 if (tmp && tmp->local_port ==
1351 mlxsw_sp_port->local_port) {
1352 /* LAG records can only point to LAG
1353 * devices or VLAN devices on top.
1355 if (!netif_is_lag_master(orig_dev) &&
1356 !is_vlan_dev(orig_dev))
1358 if (vport_fid && vport_fid == fid)
1360 else if (!vport_fid &&
1361 !mlxsw_sp_fid_is_vfid(fid))
1365 ether_addr_copy(fdb->addr, mac);
1366 fdb->ndm_state = NUD_REACHABLE;
1367 err = cb(&fdb->obj);
1374 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1378 return stored_err ? stored_err : err;
1381 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1382 struct switchdev_obj_port_vlan *vlan,
1383 switchdev_obj_dump_cb_t *cb)
1388 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1390 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1391 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1392 return cb(&vlan->obj);
1395 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1397 if (vid == mlxsw_sp_port->pvid)
1398 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1399 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1400 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1401 vlan->vid_begin = vid;
1402 vlan->vid_end = vid;
1403 err = cb(&vlan->obj);
1410 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1411 struct switchdev_obj *obj,
1412 switchdev_obj_dump_cb_t *cb)
1414 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1417 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1422 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1423 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1424 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1426 case SWITCHDEV_OBJ_ID_PORT_FDB:
1427 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1428 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1439 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1440 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1441 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1442 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1443 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1444 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1447 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1449 struct net_device *dev)
1451 struct switchdev_notifier_fdb_info info;
1452 unsigned long notifier_type;
1454 if (learning_sync) {
1457 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1458 call_switchdev_notifiers(notifier_type, dev, &info.info);
1462 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1463 char *sfn_pl, int rec_index,
1466 struct mlxsw_sp_port *mlxsw_sp_port;
1470 bool do_notification = true;
1473 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1474 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1475 if (!mlxsw_sp_port) {
1476 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1480 if (mlxsw_sp_fid_is_vfid(fid)) {
1481 struct mlxsw_sp_port *mlxsw_sp_vport;
1483 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1485 if (!mlxsw_sp_vport) {
1486 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1490 /* Override the physical port with the vPort. */
1491 mlxsw_sp_port = mlxsw_sp_vport;
1497 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1500 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1504 if (!do_notification)
1506 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1507 adding, mac, vid, mlxsw_sp_port->dev);
1512 do_notification = false;
1516 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1517 char *sfn_pl, int rec_index,
1520 struct mlxsw_sp_port *mlxsw_sp_port;
1521 struct net_device *dev;
1526 bool do_notification = true;
1529 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1530 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1531 if (!mlxsw_sp_port) {
1532 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1536 if (mlxsw_sp_fid_is_vfid(fid)) {
1537 struct mlxsw_sp_port *mlxsw_sp_vport;
1539 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1541 if (!mlxsw_sp_vport) {
1542 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1546 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1547 dev = mlxsw_sp_vport->dev;
1549 /* Override the physical port with the vPort. */
1550 mlxsw_sp_port = mlxsw_sp_vport;
1552 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1557 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1560 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1564 if (!do_notification)
1566 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1572 do_notification = false;
1576 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1577 char *sfn_pl, int rec_index)
1579 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1580 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1581 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1584 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1585 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1588 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1589 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1592 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1593 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1599 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1601 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1602 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1605 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1607 struct mlxsw_sp *mlxsw_sp;
1613 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1617 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1620 mlxsw_reg_sfn_pack(sfn_pl);
1621 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1623 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1626 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1627 for (i = 0; i < num_rec; i++)
1628 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1633 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1636 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1640 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1642 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1645 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1646 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1647 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1651 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1653 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1656 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1658 return mlxsw_sp_fdb_init(mlxsw_sp);
1661 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1663 mlxsw_sp_fdb_fini(mlxsw_sp);
1666 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1668 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1671 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)