2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
40 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
41 struct mlx5e_l2_rule *ai, int type);
42 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
43 struct mlx5e_l2_rule *ai);
59 MLX5E_ACTION_NONE = 0,
64 struct mlx5e_l2_hash_node {
65 struct hlist_node hlist;
67 struct mlx5e_l2_rule ai;
70 static inline int mlx5e_hash_l2(u8 *addr)
75 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
77 struct mlx5e_l2_hash_node *hn;
78 int ix = mlx5e_hash_l2(addr);
81 hlist_for_each_entry(hn, &hash[ix], hlist)
82 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
88 hn->action = MLX5E_ACTION_NONE;
92 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
96 ether_addr_copy(hn->ai.addr, addr);
97 hn->action = MLX5E_ACTION_ADD;
99 hlist_add_head(&hn->hlist, &hash[ix]);
102 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
104 hlist_del(&hn->hlist);
108 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
110 struct net_device *ndev = priv->netdev;
119 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
122 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
124 if (list_size > max_list_size) {
126 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
127 list_size, max_list_size);
128 list_size = max_list_size;
131 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
136 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
142 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
144 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
151 enum mlx5e_vlan_rule_type {
152 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
153 MLX5E_VLAN_RULE_TYPE_ANY_VID,
154 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
157 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
158 enum mlx5e_vlan_rule_type rule_type,
159 u16 vid, struct mlx5_flow_spec *spec)
161 struct mlx5_flow_act flow_act = {
162 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
163 .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
166 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
167 struct mlx5_flow_destination dest;
168 struct mlx5_flow_handle **rule_p;
171 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
172 dest.ft = priv->fs.l2.ft.t;
174 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
175 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
178 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
179 rule_p = &priv->fs.vlan.untagged_rule;
181 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
182 rule_p = &priv->fs.vlan.any_vlan_rule;
183 MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
185 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
186 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
187 MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 outer_headers.first_vid);
190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
195 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
197 if (IS_ERR(*rule_p)) {
198 err = PTR_ERR(*rule_p);
200 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
206 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
207 enum mlx5e_vlan_rule_type rule_type, u16 vid)
209 struct mlx5_flow_spec *spec;
212 spec = mlx5_vzalloc(sizeof(*spec));
214 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
218 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
219 mlx5e_vport_context_update_vlans(priv);
221 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
228 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
229 enum mlx5e_vlan_rule_type rule_type, u16 vid)
232 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
233 if (priv->fs.vlan.untagged_rule) {
234 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
235 priv->fs.vlan.untagged_rule = NULL;
238 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
239 if (priv->fs.vlan.any_vlan_rule) {
240 mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
241 priv->fs.vlan.any_vlan_rule = NULL;
244 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
245 mlx5e_vport_context_update_vlans(priv);
246 if (priv->fs.vlan.active_vlans_rule[vid]) {
247 mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
248 priv->fs.vlan.active_vlans_rule[vid] = NULL;
250 mlx5e_vport_context_update_vlans(priv);
255 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
257 if (!priv->fs.vlan.filter_disabled)
260 priv->fs.vlan.filter_disabled = false;
261 if (priv->netdev->flags & IFF_PROMISC)
263 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
266 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
268 if (priv->fs.vlan.filter_disabled)
271 priv->fs.vlan.filter_disabled = true;
272 if (priv->netdev->flags & IFF_PROMISC)
274 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
277 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
280 struct mlx5e_priv *priv = netdev_priv(dev);
282 set_bit(vid, priv->fs.vlan.active_vlans);
284 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
287 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
290 struct mlx5e_priv *priv = netdev_priv(dev);
292 clear_bit(vid, priv->fs.vlan.active_vlans);
294 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
299 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
303 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
305 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
306 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
309 if (priv->fs.vlan.filter_disabled &&
310 !(priv->netdev->flags & IFF_PROMISC))
311 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
314 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
318 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
320 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
321 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
324 if (priv->fs.vlan.filter_disabled &&
325 !(priv->netdev->flags & IFF_PROMISC))
326 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
329 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
330 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
331 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
333 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
334 struct mlx5e_l2_hash_node *hn)
336 switch (hn->action) {
337 case MLX5E_ACTION_ADD:
338 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
339 hn->action = MLX5E_ACTION_NONE;
342 case MLX5E_ACTION_DEL:
343 mlx5e_del_l2_flow_rule(priv, &hn->ai);
344 mlx5e_del_l2_from_hash(hn);
349 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
351 struct net_device *netdev = priv->netdev;
352 struct netdev_hw_addr *ha;
354 netif_addr_lock_bh(netdev);
356 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
357 priv->netdev->dev_addr);
359 netdev_for_each_uc_addr(ha, netdev)
360 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
362 netdev_for_each_mc_addr(ha, netdev)
363 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
365 netif_addr_unlock_bh(netdev);
368 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
369 u8 addr_array[][ETH_ALEN], int size)
371 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
372 struct net_device *ndev = priv->netdev;
373 struct mlx5e_l2_hash_node *hn;
374 struct hlist_head *addr_list;
375 struct hlist_node *tmp;
379 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
381 if (is_uc) /* Make sure our own address is pushed first */
382 ether_addr_copy(addr_array[i++], ndev->dev_addr);
383 else if (priv->fs.l2.broadcast_enabled)
384 ether_addr_copy(addr_array[i++], ndev->broadcast);
386 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
387 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
391 ether_addr_copy(addr_array[i++], hn->ai.addr);
395 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
398 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
399 struct mlx5e_l2_hash_node *hn;
400 u8 (*addr_array)[ETH_ALEN] = NULL;
401 struct hlist_head *addr_list;
402 struct hlist_node *tmp;
408 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
410 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
411 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
413 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
414 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
417 if (size > max_size) {
418 netdev_warn(priv->netdev,
419 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
420 is_uc ? "UC" : "MC", size, max_size);
425 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
430 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
433 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
436 netdev_err(priv->netdev,
437 "Failed to modify vport %s list err(%d)\n",
438 is_uc ? "UC" : "MC", err);
442 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
444 struct mlx5e_l2_table *ea = &priv->fs.l2;
446 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
447 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
448 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
449 ea->allmulti_enabled,
450 ea->promisc_enabled);
453 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
455 struct mlx5e_l2_hash_node *hn;
456 struct hlist_node *tmp;
459 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
460 mlx5e_execute_l2_action(priv, hn);
462 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
463 mlx5e_execute_l2_action(priv, hn);
466 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
468 struct mlx5e_l2_hash_node *hn;
469 struct hlist_node *tmp;
472 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
473 hn->action = MLX5E_ACTION_DEL;
474 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
475 hn->action = MLX5E_ACTION_DEL;
477 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
478 mlx5e_sync_netdev_addr(priv);
480 mlx5e_apply_netdev_addr(priv);
483 void mlx5e_set_rx_mode_work(struct work_struct *work)
485 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
488 struct mlx5e_l2_table *ea = &priv->fs.l2;
489 struct net_device *ndev = priv->netdev;
491 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
492 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
493 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
494 bool broadcast_enabled = rx_mode_enable;
496 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
497 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
498 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
499 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
500 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
501 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
503 if (enable_promisc) {
504 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
505 if (!priv->fs.vlan.filter_disabled)
506 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
510 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
511 if (enable_broadcast)
512 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
514 mlx5e_handle_netdev_addr(priv);
516 if (disable_broadcast)
517 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
518 if (disable_allmulti)
519 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
520 if (disable_promisc) {
521 if (!priv->fs.vlan.filter_disabled)
522 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
524 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
527 ea->promisc_enabled = promisc_enabled;
528 ea->allmulti_enabled = allmulti_enabled;
529 ea->broadcast_enabled = broadcast_enabled;
531 mlx5e_vport_context_update(priv);
534 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
538 for (i = ft->num_groups - 1; i >= 0; i--) {
539 if (!IS_ERR_OR_NULL(ft->g[i]))
540 mlx5_destroy_flow_group(ft->g[i]);
546 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
548 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
551 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
553 mlx5e_destroy_groups(ft);
555 mlx5_destroy_flow_table(ft->t);
559 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
563 for (i = 0; i < MLX5E_NUM_TT; i++) {
564 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
565 mlx5_del_flow_rules(ttc->rules[i]);
566 ttc->rules[i] = NULL;
575 [MLX5E_TT_IPV4_TCP] = {
577 .proto = IPPROTO_TCP,
579 [MLX5E_TT_IPV6_TCP] = {
581 .proto = IPPROTO_TCP,
583 [MLX5E_TT_IPV4_UDP] = {
585 .proto = IPPROTO_UDP,
587 [MLX5E_TT_IPV6_UDP] = {
589 .proto = IPPROTO_UDP,
591 [MLX5E_TT_IPV4_IPSEC_AH] = {
595 [MLX5E_TT_IPV6_IPSEC_AH] = {
599 [MLX5E_TT_IPV4_IPSEC_ESP] = {
601 .proto = IPPROTO_ESP,
603 [MLX5E_TT_IPV6_IPSEC_ESP] = {
605 .proto = IPPROTO_ESP,
621 static struct mlx5_flow_handle *
622 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
623 struct mlx5_flow_table *ft,
624 struct mlx5_flow_destination *dest,
628 struct mlx5_flow_act flow_act = {
629 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
630 .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
633 struct mlx5_flow_handle *rule;
634 struct mlx5_flow_spec *spec;
637 spec = mlx5_vzalloc(sizeof(*spec));
639 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
640 return ERR_PTR(-ENOMEM);
644 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
645 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
646 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
649 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
650 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
651 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
654 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
657 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
661 return err ? ERR_PTR(err) : rule;
664 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
666 struct mlx5_flow_destination dest;
667 struct mlx5e_ttc_table *ttc;
668 struct mlx5_flow_handle **rules;
669 struct mlx5_flow_table *ft;
677 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
678 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
679 if (tt == MLX5E_TT_ANY)
680 dest.tir_num = priv->direct_tir[0].tirn;
682 dest.tir_num = priv->indir_tir[tt].tirn;
683 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
685 ttc_rules[tt].proto);
686 if (IS_ERR(rules[tt]))
693 err = PTR_ERR(rules[tt]);
695 mlx5e_cleanup_ttc_rules(ttc);
699 #define MLX5E_TTC_NUM_GROUPS 3
700 #define MLX5E_TTC_GROUP1_SIZE BIT(3)
701 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
702 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
703 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
704 MLX5E_TTC_GROUP2_SIZE +\
705 MLX5E_TTC_GROUP3_SIZE)
706 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
708 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
709 struct mlx5e_flow_table *ft = &ttc->ft;
715 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
716 sizeof(*ft->g), GFP_KERNEL);
719 in = mlx5_vzalloc(inlen);
726 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
727 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
728 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
729 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
730 MLX5_SET_CFG(in, start_flow_index, ix);
731 ix += MLX5E_TTC_GROUP1_SIZE;
732 MLX5_SET_CFG(in, end_flow_index, ix - 1);
733 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
734 if (IS_ERR(ft->g[ft->num_groups]))
739 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
740 MLX5_SET_CFG(in, start_flow_index, ix);
741 ix += MLX5E_TTC_GROUP2_SIZE;
742 MLX5_SET_CFG(in, end_flow_index, ix - 1);
743 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
744 if (IS_ERR(ft->g[ft->num_groups]))
749 memset(in, 0, inlen);
750 MLX5_SET_CFG(in, start_flow_index, ix);
751 ix += MLX5E_TTC_GROUP3_SIZE;
752 MLX5_SET_CFG(in, end_flow_index, ix - 1);
753 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
754 if (IS_ERR(ft->g[ft->num_groups]))
762 err = PTR_ERR(ft->g[ft->num_groups]);
763 ft->g[ft->num_groups] = NULL;
769 static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
771 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
773 mlx5e_cleanup_ttc_rules(ttc);
774 mlx5e_destroy_flow_table(&ttc->ft);
777 static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
779 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
780 struct mlx5e_flow_table *ft = &ttc->ft;
783 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
784 MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0);
786 err = PTR_ERR(ft->t);
791 err = mlx5e_create_ttc_table_groups(ttc);
795 err = mlx5e_generate_ttc_table_rules(priv);
801 mlx5e_destroy_flow_table(ft);
805 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
806 struct mlx5e_l2_rule *ai)
808 if (!IS_ERR_OR_NULL(ai->rule)) {
809 mlx5_del_flow_rules(ai->rule);
814 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
815 struct mlx5e_l2_rule *ai, int type)
817 struct mlx5_flow_act flow_act = {
818 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
819 .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
822 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
823 struct mlx5_flow_destination dest;
824 struct mlx5_flow_spec *spec;
829 spec = mlx5_vzalloc(sizeof(*spec));
831 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
835 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
836 outer_headers.dmac_47_16);
837 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
838 outer_headers.dmac_47_16);
840 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
841 dest.ft = priv->fs.ttc.ft.t;
844 case MLX5E_FULLMATCH:
845 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
846 eth_broadcast_addr(mc_dmac);
847 ether_addr_copy(mv_dmac, ai->addr);
851 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
860 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
861 if (IS_ERR(ai->rule)) {
862 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
864 err = PTR_ERR(ai->rule);
873 #define MLX5E_NUM_L2_GROUPS 3
874 #define MLX5E_L2_GROUP1_SIZE BIT(0)
875 #define MLX5E_L2_GROUP2_SIZE BIT(15)
876 #define MLX5E_L2_GROUP3_SIZE BIT(0)
877 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
878 MLX5E_L2_GROUP2_SIZE +\
879 MLX5E_L2_GROUP3_SIZE)
880 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
882 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
883 struct mlx5e_flow_table *ft = &l2_table->ft;
890 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
893 in = mlx5_vzalloc(inlen);
899 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
900 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
901 outer_headers.dmac_47_16);
902 /* Flow Group for promiscuous */
903 MLX5_SET_CFG(in, start_flow_index, ix);
904 ix += MLX5E_L2_GROUP1_SIZE;
905 MLX5_SET_CFG(in, end_flow_index, ix - 1);
906 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
907 if (IS_ERR(ft->g[ft->num_groups]))
908 goto err_destroy_groups;
911 /* Flow Group for full match */
912 eth_broadcast_addr(mc_dmac);
913 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
914 MLX5_SET_CFG(in, start_flow_index, ix);
915 ix += MLX5E_L2_GROUP2_SIZE;
916 MLX5_SET_CFG(in, end_flow_index, ix - 1);
917 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
918 if (IS_ERR(ft->g[ft->num_groups]))
919 goto err_destroy_groups;
922 /* Flow Group for allmulti */
923 eth_zero_addr(mc_dmac);
925 MLX5_SET_CFG(in, start_flow_index, ix);
926 ix += MLX5E_L2_GROUP3_SIZE;
927 MLX5_SET_CFG(in, end_flow_index, ix - 1);
928 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
929 if (IS_ERR(ft->g[ft->num_groups]))
930 goto err_destroy_groups;
937 err = PTR_ERR(ft->g[ft->num_groups]);
938 ft->g[ft->num_groups] = NULL;
939 mlx5e_destroy_groups(ft);
945 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
947 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
950 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
952 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
953 struct mlx5e_flow_table *ft = &l2_table->ft;
957 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
958 MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0);
961 err = PTR_ERR(ft->t);
966 err = mlx5e_create_l2_table_groups(l2_table);
968 goto err_destroy_flow_table;
972 err_destroy_flow_table:
973 mlx5_destroy_flow_table(ft->t);
979 #define MLX5E_NUM_VLAN_GROUPS 2
980 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
981 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
982 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
983 MLX5E_VLAN_GROUP1_SIZE)
985 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
990 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
992 memset(in, 0, inlen);
993 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
994 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
995 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
996 MLX5_SET_CFG(in, start_flow_index, ix);
997 ix += MLX5E_VLAN_GROUP0_SIZE;
998 MLX5_SET_CFG(in, end_flow_index, ix - 1);
999 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1000 if (IS_ERR(ft->g[ft->num_groups]))
1001 goto err_destroy_groups;
1004 memset(in, 0, inlen);
1005 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1006 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1007 MLX5_SET_CFG(in, start_flow_index, ix);
1008 ix += MLX5E_VLAN_GROUP1_SIZE;
1009 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1010 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1011 if (IS_ERR(ft->g[ft->num_groups]))
1012 goto err_destroy_groups;
1018 err = PTR_ERR(ft->g[ft->num_groups]);
1019 ft->g[ft->num_groups] = NULL;
1020 mlx5e_destroy_groups(ft);
1025 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1028 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1031 in = mlx5_vzalloc(inlen);
1035 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1041 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1043 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1047 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
1048 MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0);
1050 if (IS_ERR(ft->t)) {
1051 err = PTR_ERR(ft->t);
1055 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1058 goto err_destroy_vlan_table;
1061 err = mlx5e_create_vlan_table_groups(ft);
1065 mlx5e_add_vlan_rules(priv);
1071 err_destroy_vlan_table:
1072 mlx5_destroy_flow_table(ft->t);
1078 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1080 mlx5e_del_vlan_rules(priv);
1081 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1084 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1088 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1089 MLX5_FLOW_NAMESPACE_KERNEL);
1094 err = mlx5e_arfs_create_tables(priv);
1096 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1098 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1101 err = mlx5e_create_ttc_table(priv);
1103 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1105 goto err_destroy_arfs_tables;
1108 err = mlx5e_create_l2_table(priv);
1110 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1112 goto err_destroy_ttc_table;
1115 err = mlx5e_create_vlan_table(priv);
1117 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1119 goto err_destroy_l2_table;
1122 mlx5e_ethtool_init_steering(priv);
1126 err_destroy_l2_table:
1127 mlx5e_destroy_l2_table(priv);
1128 err_destroy_ttc_table:
1129 mlx5e_destroy_ttc_table(priv);
1130 err_destroy_arfs_tables:
1131 mlx5e_arfs_destroy_tables(priv);
1136 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1138 mlx5e_destroy_vlan_table(priv);
1139 mlx5e_destroy_l2_table(priv);
1140 mlx5e_destroy_ttc_table(priv);
1141 mlx5e_arfs_destroy_tables(priv);
1142 mlx5e_ethtool_cleanup_steering(priv);