2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
42 #include "lib/devcom.h"
49 /* There are two match-all miss flows, one for unicast dst mac and
52 #define MLX5_ESW_MISS_FLOWS (2)
54 #define fdb_prio_table(esw, chain, prio, level) \
55 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
57 static struct mlx5_flow_table *
58 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
60 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
62 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
64 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
67 u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
69 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
75 u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
83 struct mlx5_flow_handle *
84 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
85 struct mlx5_flow_spec *spec,
86 struct mlx5_esw_flow_attr *attr)
88 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
89 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
90 bool split = !!(attr->split_count);
91 struct mlx5_flow_handle *rule;
92 struct mlx5_flow_table *fdb;
96 if (esw->mode != SRIOV_OFFLOADS)
97 return ERR_PTR(-EOPNOTSUPP);
99 flow_act.action = attr->action;
100 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
101 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
102 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
103 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
104 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
105 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
106 flow_act.vlan[0].vid = attr->vlan_vid[0];
107 flow_act.vlan[0].prio = attr->vlan_prio[0];
108 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
109 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
110 flow_act.vlan[1].vid = attr->vlan_vid[1];
111 flow_act.vlan[1].prio = attr->vlan_prio[1];
115 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
116 if (attr->dest_chain) {
117 struct mlx5_flow_table *ft;
119 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
122 goto err_create_goto_table;
125 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
129 for (j = attr->split_count; j < attr->out_count; j++) {
130 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
131 dest[i].vport.num = attr->dests[j].rep->vport;
132 dest[i].vport.vhca_id =
133 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
134 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
135 dest[i].vport.flags |=
136 MLX5_FLOW_DEST_VPORT_VHCA_ID;
137 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
138 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
139 flow_act.reformat_id = attr->dests[j].encap_id;
140 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
141 dest[i].vport.reformat_id =
142 attr->dests[j].encap_id;
148 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
149 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
150 dest[i].counter_id = mlx5_fc_id(attr->counter);
154 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
155 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
157 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
158 MLX5_SET(fte_match_set_misc, misc,
159 source_eswitch_owner_vhca_id,
160 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
162 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
163 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
164 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
165 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
166 source_eswitch_owner_vhca_id);
168 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
170 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
171 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
172 if (attr->match_level != MLX5_MATCH_NONE)
173 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
174 } else if (attr->match_level != MLX5_MATCH_NONE) {
175 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
178 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
179 flow_act.modify_id = attr->mod_hdr_id;
181 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
183 rule = ERR_CAST(fdb);
187 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
191 esw->offloads.num_flows++;
196 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
198 if (attr->dest_chain)
199 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
200 err_create_goto_table:
204 struct mlx5_flow_handle *
205 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
206 struct mlx5_flow_spec *spec,
207 struct mlx5_esw_flow_attr *attr)
209 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
210 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
211 struct mlx5_flow_table *fast_fdb;
212 struct mlx5_flow_table *fwd_fdb;
213 struct mlx5_flow_handle *rule;
217 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
218 if (IS_ERR(fast_fdb)) {
219 rule = ERR_CAST(fast_fdb);
223 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
224 if (IS_ERR(fwd_fdb)) {
225 rule = ERR_CAST(fwd_fdb);
229 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
230 for (i = 0; i < attr->split_count; i++) {
231 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
232 dest[i].vport.num = attr->dests[i].rep->vport;
233 dest[i].vport.vhca_id =
234 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
235 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
236 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
237 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
238 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
239 dest[i].vport.reformat_id = attr->dests[i].encap_id;
242 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
243 dest[i].ft = fwd_fdb,
246 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
247 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
249 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
250 MLX5_SET(fte_match_set_misc, misc,
251 source_eswitch_owner_vhca_id,
252 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
254 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
255 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
256 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
257 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
258 source_eswitch_owner_vhca_id);
260 if (attr->match_level == MLX5_MATCH_NONE)
261 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
263 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
264 MLX5_MATCH_MISC_PARAMETERS;
266 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
271 esw->offloads.num_flows++;
275 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
277 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
283 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
284 struct mlx5_flow_handle *rule,
285 struct mlx5_esw_flow_attr *attr,
288 bool split = (attr->split_count > 0);
290 mlx5_del_flow_rules(rule);
291 esw->offloads.num_flows--;
294 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
295 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
297 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
298 if (attr->dest_chain)
299 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
304 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
305 struct mlx5_flow_handle *rule,
306 struct mlx5_esw_flow_attr *attr)
308 __mlx5_eswitch_del_rule(esw, rule, attr, false);
312 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
313 struct mlx5_flow_handle *rule,
314 struct mlx5_esw_flow_attr *attr)
316 __mlx5_eswitch_del_rule(esw, rule, attr, true);
319 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
321 struct mlx5_eswitch_rep *rep;
322 int vf_vport, err = 0;
324 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
325 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
326 rep = &esw->offloads.vport_reps[vf_vport];
327 if (!rep->rep_if[REP_ETH].valid)
330 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
339 static struct mlx5_eswitch_rep *
340 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
342 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
344 in_rep = attr->in_rep;
345 out_rep = attr->dests[0].rep;
357 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
358 bool push, bool pop, bool fwd)
360 struct mlx5_eswitch_rep *in_rep, *out_rep;
362 if ((push || pop) && !fwd)
365 in_rep = attr->in_rep;
366 out_rep = attr->dests[0].rep;
368 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
371 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
374 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
375 if (!push && !pop && fwd)
376 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
379 /* protects against (1) setting rules with different vlans to push and
380 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
382 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
391 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
392 struct mlx5_esw_flow_attr *attr)
394 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
395 struct mlx5_eswitch_rep *vport = NULL;
399 /* nop if we're on the vlan push/pop non emulation mode */
400 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
403 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
404 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
405 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
408 err = esw_add_vlan_action_check(attr, push, pop, fwd);
412 attr->vlan_handled = false;
414 vport = esw_vlan_action_get_vport(attr, push, pop);
416 if (!push && !pop && fwd) {
417 /* tracks VF --> wire rules without vlan push action */
418 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
419 vport->vlan_refcount++;
420 attr->vlan_handled = true;
429 if (!(offloads->vlan_push_pop_refcount)) {
430 /* it's the 1st vlan rule, apply global vlan pop policy */
431 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
435 offloads->vlan_push_pop_refcount++;
438 if (vport->vlan_refcount)
441 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
442 SET_VLAN_INSERT | SET_VLAN_STRIP);
445 vport->vlan = attr->vlan_vid[0];
447 vport->vlan_refcount++;
451 attr->vlan_handled = true;
455 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
456 struct mlx5_esw_flow_attr *attr)
458 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
459 struct mlx5_eswitch_rep *vport = NULL;
463 /* nop if we're on the vlan push/pop non emulation mode */
464 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
467 if (!attr->vlan_handled)
470 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
471 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
472 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
474 vport = esw_vlan_action_get_vport(attr, push, pop);
476 if (!push && !pop && fwd) {
477 /* tracks VF --> wire rules without vlan push action */
478 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
479 vport->vlan_refcount--;
485 vport->vlan_refcount--;
486 if (vport->vlan_refcount)
487 goto skip_unset_push;
490 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
491 0, 0, SET_VLAN_STRIP);
497 offloads->vlan_push_pop_refcount--;
498 if (offloads->vlan_push_pop_refcount)
501 /* no more vlan rules, stop global vlan pop policy */
502 err = esw_set_global_vlan_pop(esw, 0);
508 struct mlx5_flow_handle *
509 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
511 struct mlx5_flow_act flow_act = {0};
512 struct mlx5_flow_destination dest = {};
513 struct mlx5_flow_handle *flow_rule;
514 struct mlx5_flow_spec *spec;
517 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
519 flow_rule = ERR_PTR(-ENOMEM);
523 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
524 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
525 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
527 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
528 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
529 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
531 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
532 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
533 dest.vport.num = vport;
534 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
536 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
537 &flow_act, &dest, 1);
538 if (IS_ERR(flow_rule))
539 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
544 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
546 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
548 mlx5_del_flow_rules(rule);
551 static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
552 struct mlx5_flow_spec *spec,
553 struct mlx5_flow_destination *dest)
555 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
558 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
559 MLX5_CAP_GEN(peer_dev, vhca_id));
561 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
563 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
565 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
566 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
567 source_eswitch_owner_vhca_id);
569 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
571 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
572 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
575 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
576 struct mlx5_core_dev *peer_dev)
578 struct mlx5_flow_destination dest = {};
579 struct mlx5_flow_act flow_act = {0};
580 struct mlx5_flow_handle **flows;
581 struct mlx5_flow_handle *flow;
582 struct mlx5_flow_spec *spec;
583 /* total vports is the same for both e-switches */
584 int nvports = esw->total_vports;
588 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
592 peer_miss_rules_setup(peer_dev, spec, &dest);
594 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
597 goto alloc_flows_err;
600 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
601 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
604 for (i = 1; i < nvports; i++) {
605 MLX5_SET(fte_match_set_misc, misc, source_port, i);
606 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
607 spec, &flow_act, &dest, 1);
610 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
616 esw->fdb_table.offloads.peer_miss_rules = flows;
622 for (i--; i > 0; i--)
623 mlx5_del_flow_rules(flows[i]);
630 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
632 struct mlx5_flow_handle **flows;
635 flows = esw->fdb_table.offloads.peer_miss_rules;
637 for (i = 1; i < esw->total_vports; i++)
638 mlx5_del_flow_rules(flows[i]);
643 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
645 struct mlx5_flow_act flow_act = {0};
646 struct mlx5_flow_destination dest = {};
647 struct mlx5_flow_handle *flow_rule = NULL;
648 struct mlx5_flow_spec *spec;
655 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
661 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
662 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
664 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
665 outer_headers.dmac_47_16);
668 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
670 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
672 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
673 &flow_act, &dest, 1);
674 if (IS_ERR(flow_rule)) {
675 err = PTR_ERR(flow_rule);
676 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
680 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
682 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
684 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
685 outer_headers.dmac_47_16);
687 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
688 &flow_act, &dest, 1);
689 if (IS_ERR(flow_rule)) {
690 err = PTR_ERR(flow_rule);
691 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
692 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
696 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
703 #define ESW_OFFLOADS_NUM_GROUPS 4
705 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
706 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
707 * for each flow table pool. We can allocate up to 16M of each pool,
708 * and we keep track of how much we used via put/get_sz_to_pool.
709 * Firmware doesn't report any of this for now.
710 * ESW_POOL is expected to be sorted from large to small
712 #define ESW_SIZE (16 * 1024 * 1024)
713 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
714 64 * 1024, 4 * 1024 };
717 get_sz_from_pool(struct mlx5_eswitch *esw)
721 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
722 if (esw->fdb_table.offloads.fdb_left[i]) {
723 --esw->fdb_table.offloads.fdb_left[i];
733 put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
737 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
738 if (sz >= ESW_POOLS[i]) {
739 ++esw->fdb_table.offloads.fdb_left[i];
745 static struct mlx5_flow_table *
746 create_next_size_table(struct mlx5_eswitch *esw,
747 struct mlx5_flow_namespace *ns,
752 struct mlx5_flow_table *fdb;
755 sz = get_sz_from_pool(esw);
757 return ERR_PTR(-ENOSPC);
759 fdb = mlx5_create_auto_grouped_flow_table(ns,
762 ESW_OFFLOADS_NUM_GROUPS,
766 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
767 (int)PTR_ERR(fdb), table_prio, level, sz);
768 put_sz_to_pool(esw, sz);
774 static struct mlx5_flow_table *
775 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
777 struct mlx5_core_dev *dev = esw->dev;
778 struct mlx5_flow_table *fdb = NULL;
779 struct mlx5_flow_namespace *ns;
780 int table_prio, l = 0;
783 if (chain == FDB_SLOW_PATH_CHAIN)
784 return esw->fdb_table.offloads.slow_fdb;
786 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
788 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
790 /* take ref on earlier levels as well */
792 fdb_prio_table(esw, chain, prio, level--).num_rules++;
793 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
797 ns = mlx5_get_fdb_sub_ns(dev, chain);
799 esw_warn(dev, "Failed to get FDB sub namespace\n");
800 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
801 return ERR_PTR(-EOPNOTSUPP);
804 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
805 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
806 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
808 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
810 /* create earlier levels for correct fs_core lookup when
813 for (l = 0; l <= level; l++) {
814 if (fdb_prio_table(esw, chain, prio, l).fdb) {
815 fdb_prio_table(esw, chain, prio, l).num_rules++;
819 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
825 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
826 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
829 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
833 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
835 esw_put_prio_table(esw, chain, prio, l);
841 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
845 if (chain == FDB_SLOW_PATH_CHAIN)
848 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
850 for (l = level; l >= 0; l--) {
851 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
854 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
855 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
856 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
859 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
862 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
864 /* If lazy creation isn't supported, deref the fast path tables */
865 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
866 esw_put_prio_table(esw, 0, 1, 1);
867 esw_put_prio_table(esw, 0, 1, 0);
871 #define MAX_PF_SQ 256
872 #define MAX_SQ_NVPORTS 32
874 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
876 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
877 struct mlx5_flow_table_attr ft_attr = {};
878 struct mlx5_core_dev *dev = esw->dev;
879 u32 *flow_group_in, max_flow_counter;
880 struct mlx5_flow_namespace *root_ns;
881 struct mlx5_flow_table *fdb = NULL;
882 int table_size, ix, err = 0, i;
883 struct mlx5_flow_group *g;
884 u32 flags = 0, fdb_max;
885 void *match_criteria;
888 esw_debug(esw->dev, "Create offloads FDB Tables\n");
889 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
893 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
895 esw_warn(dev, "Failed to get FDB flow namespace\n");
900 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
901 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
902 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
904 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
905 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
906 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
909 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
910 esw->fdb_table.offloads.fdb_left[i] =
911 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
913 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
914 MLX5_ESW_MISS_FLOWS + esw->total_vports;
916 /* create the slow path fdb with encap set, so further table instances
917 * can be created at run time while VFs are probed if the FW allows that.
919 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
920 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
921 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
923 ft_attr.flags = flags;
924 ft_attr.max_fte = table_size;
925 ft_attr.prio = FDB_SLOW_PATH;
927 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
930 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
933 esw->fdb_table.offloads.slow_fdb = fdb;
935 /* If lazy creation isn't supported, open the fast path tables now */
936 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
937 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
938 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
939 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
940 esw_get_prio_table(esw, 0, 1, 0);
941 esw_get_prio_table(esw, 0, 1, 1);
943 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
944 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
947 /* create send-to-vport group */
948 memset(flow_group_in, 0, inlen);
949 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
950 MLX5_MATCH_MISC_PARAMETERS);
952 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
954 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
955 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
957 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
958 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
959 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
961 g = mlx5_create_flow_group(fdb, flow_group_in);
964 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
967 esw->fdb_table.offloads.send_to_vport_grp = g;
969 /* create peer esw miss group */
970 memset(flow_group_in, 0, inlen);
971 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
972 MLX5_MATCH_MISC_PARAMETERS);
974 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
977 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
978 misc_parameters.source_port);
979 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
980 misc_parameters.source_eswitch_owner_vhca_id);
982 MLX5_SET(create_flow_group_in, flow_group_in,
983 source_eswitch_owner_vhca_id_valid, 1);
984 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
985 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
986 ix + esw->total_vports - 1);
987 ix += esw->total_vports;
989 g = mlx5_create_flow_group(fdb, flow_group_in);
992 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
995 esw->fdb_table.offloads.peer_miss_grp = g;
997 /* create miss group */
998 memset(flow_group_in, 0, inlen);
999 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1000 MLX5_MATCH_OUTER_HEADERS);
1001 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1003 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1004 outer_headers.dmac_47_16);
1007 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1008 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1009 ix + MLX5_ESW_MISS_FLOWS);
1011 g = mlx5_create_flow_group(fdb, flow_group_in);
1014 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1017 esw->fdb_table.offloads.miss_grp = g;
1019 err = esw_add_fdb_miss_rule(esw);
1023 esw->nvports = nvports;
1024 kvfree(flow_group_in);
1028 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1030 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1032 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1034 esw_destroy_offloads_fast_fdb_tables(esw);
1035 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1038 kvfree(flow_group_in);
1042 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1044 if (!esw->fdb_table.offloads.slow_fdb)
1047 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1048 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1049 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1050 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1051 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1052 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1054 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1055 esw_destroy_offloads_fast_fdb_tables(esw);
1058 static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1060 struct mlx5_flow_table_attr ft_attr = {};
1061 struct mlx5_core_dev *dev = esw->dev;
1062 struct mlx5_flow_table *ft_offloads;
1063 struct mlx5_flow_namespace *ns;
1066 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1068 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1072 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1074 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1075 if (IS_ERR(ft_offloads)) {
1076 err = PTR_ERR(ft_offloads);
1077 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1081 esw->offloads.ft_offloads = ft_offloads;
1085 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1087 struct mlx5_esw_offload *offloads = &esw->offloads;
1089 mlx5_destroy_flow_table(offloads->ft_offloads);
1092 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1094 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1095 struct mlx5_flow_group *g;
1097 void *match_criteria, *misc;
1100 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1101 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1105 /* create vport rx group */
1106 memset(flow_group_in, 0, inlen);
1107 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1108 MLX5_MATCH_MISC_PARAMETERS);
1110 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1111 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1112 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1114 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1115 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1117 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1121 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1125 esw->offloads.vport_rx_group = g;
1127 kvfree(flow_group_in);
1131 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1133 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1136 struct mlx5_flow_handle *
1137 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1138 struct mlx5_flow_destination *dest)
1140 struct mlx5_flow_act flow_act = {0};
1141 struct mlx5_flow_handle *flow_rule;
1142 struct mlx5_flow_spec *spec;
1145 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1147 flow_rule = ERR_PTR(-ENOMEM);
1151 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1152 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1154 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1155 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1157 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1159 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1160 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1161 &flow_act, dest, 1);
1162 if (IS_ERR(flow_rule)) {
1163 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1172 static int esw_offloads_start(struct mlx5_eswitch *esw,
1173 struct netlink_ext_ack *extack)
1175 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1177 if (esw->mode != SRIOV_LEGACY) {
1178 NL_SET_ERR_MSG_MOD(extack,
1179 "Can't set offloads mode, SRIOV legacy not enabled");
1183 mlx5_eswitch_disable_sriov(esw);
1184 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1186 NL_SET_ERR_MSG_MOD(extack,
1187 "Failed setting eswitch to offloads");
1188 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1190 NL_SET_ERR_MSG_MOD(extack,
1191 "Failed setting eswitch back to legacy");
1194 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1195 if (mlx5_eswitch_inline_mode_get(esw,
1197 &esw->offloads.inline_mode)) {
1198 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1199 NL_SET_ERR_MSG_MOD(extack,
1200 "Inline mode is different between vports");
1206 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1208 kfree(esw->offloads.vport_reps);
1211 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1213 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
1214 struct mlx5_core_dev *dev = esw->dev;
1215 struct mlx5_esw_offload *offloads;
1216 struct mlx5_eswitch_rep *rep;
1220 esw->offloads.vport_reps = kcalloc(total_vfs,
1221 sizeof(struct mlx5_eswitch_rep),
1223 if (!esw->offloads.vport_reps)
1226 offloads = &esw->offloads;
1227 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1229 for (vport = 0; vport < total_vfs; vport++) {
1230 rep = &offloads->vport_reps[vport];
1233 ether_addr_copy(rep->hw_id, hw_id);
1236 offloads->vport_reps[0].vport = MLX5_VPORT_UPLINK;
1241 static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
1244 struct mlx5_eswitch_rep *rep;
1247 for (vport = nvports - 1; vport >= 0; vport--) {
1248 rep = &esw->offloads.vport_reps[vport];
1249 if (!rep->rep_if[rep_type].valid)
1252 rep->rep_if[rep_type].unload(rep);
1256 static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
1258 u8 rep_type = NUM_REP_TYPES;
1260 while (rep_type-- > 0)
1261 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1264 static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
1267 struct mlx5_eswitch_rep *rep;
1271 for (vport = 0; vport < nvports; vport++) {
1272 rep = &esw->offloads.vport_reps[vport];
1273 if (!rep->rep_if[rep_type].valid)
1276 err = rep->rep_if[rep_type].load(esw->dev, rep);
1284 esw_offloads_unload_reps_type(esw, vport, rep_type);
1288 static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
1293 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1294 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
1302 while (rep_type-- > 0)
1303 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1307 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1308 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1310 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1311 struct mlx5_eswitch *peer_esw)
1315 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1322 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
1324 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1326 mlx5e_tc_clean_fdb_peer_flows(esw);
1327 esw_del_fdb_peer_miss_rules(esw);
1330 static int mlx5_esw_offloads_devcom_event(int event,
1334 struct mlx5_eswitch *esw = my_data;
1335 struct mlx5_eswitch *peer_esw = event_data;
1336 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1340 case ESW_OFFLOADS_DEVCOM_PAIR:
1341 err = mlx5_esw_offloads_pair(esw, peer_esw);
1345 err = mlx5_esw_offloads_pair(peer_esw, esw);
1349 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1352 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1353 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1356 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1357 mlx5_esw_offloads_unpair(peer_esw);
1358 mlx5_esw_offloads_unpair(esw);
1365 mlx5_esw_offloads_unpair(esw);
1368 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1373 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1375 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1377 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1378 mutex_init(&esw->offloads.peer_mutex);
1380 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1383 mlx5_devcom_register_component(devcom,
1384 MLX5_DEVCOM_ESW_OFFLOADS,
1385 mlx5_esw_offloads_devcom_event,
1388 mlx5_devcom_send_event(devcom,
1389 MLX5_DEVCOM_ESW_OFFLOADS,
1390 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1393 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1395 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1397 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1400 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1401 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1403 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1406 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1410 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1412 err = esw_create_offloads_fdb_tables(esw, nvports);
1416 err = esw_create_offloads_table(esw, nvports);
1420 err = esw_create_vport_rx_group(esw, nvports);
1424 err = esw_offloads_load_reps(esw, nvports);
1428 esw_offloads_devcom_init(esw);
1432 esw_destroy_vport_rx_group(esw);
1435 esw_destroy_offloads_table(esw);
1438 esw_destroy_offloads_fdb_tables(esw);
1443 static int esw_offloads_stop(struct mlx5_eswitch *esw,
1444 struct netlink_ext_ack *extack)
1446 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1448 mlx5_eswitch_disable_sriov(esw);
1449 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1451 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
1452 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1454 NL_SET_ERR_MSG_MOD(extack,
1455 "Failed setting eswitch back to offloads");
1462 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
1464 esw_offloads_devcom_cleanup(esw);
1465 esw_offloads_unload_reps(esw, nvports);
1466 esw_destroy_vport_rx_group(esw);
1467 esw_destroy_offloads_table(esw);
1468 esw_destroy_offloads_fdb_tables(esw);
1471 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
1474 case DEVLINK_ESWITCH_MODE_LEGACY:
1475 *mlx5_mode = SRIOV_LEGACY;
1477 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1478 *mlx5_mode = SRIOV_OFFLOADS;
1487 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1489 switch (mlx5_mode) {
1491 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1493 case SRIOV_OFFLOADS:
1494 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1503 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1506 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1507 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1509 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1510 *mlx5_mode = MLX5_INLINE_MODE_L2;
1512 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1513 *mlx5_mode = MLX5_INLINE_MODE_IP;
1515 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1516 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1525 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1527 switch (mlx5_mode) {
1528 case MLX5_INLINE_MODE_NONE:
1529 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1531 case MLX5_INLINE_MODE_L2:
1532 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1534 case MLX5_INLINE_MODE_IP:
1535 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1537 case MLX5_INLINE_MODE_TCP_UDP:
1538 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1547 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1549 struct mlx5_core_dev *dev = devlink_priv(devlink);
1551 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1554 if(!MLX5_ESWITCH_MANAGER(dev))
1557 if (dev->priv.eswitch->mode == SRIOV_NONE)
1563 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1564 struct netlink_ext_ack *extack)
1566 struct mlx5_core_dev *dev = devlink_priv(devlink);
1567 u16 cur_mlx5_mode, mlx5_mode = 0;
1570 err = mlx5_devlink_eswitch_check(devlink);
1574 cur_mlx5_mode = dev->priv.eswitch->mode;
1576 if (esw_mode_from_devlink(mode, &mlx5_mode))
1579 if (cur_mlx5_mode == mlx5_mode)
1582 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1583 return esw_offloads_start(dev->priv.eswitch, extack);
1584 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1585 return esw_offloads_stop(dev->priv.eswitch, extack);
1590 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1592 struct mlx5_core_dev *dev = devlink_priv(devlink);
1595 err = mlx5_devlink_eswitch_check(devlink);
1599 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
1602 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
1603 struct netlink_ext_ack *extack)
1605 struct mlx5_core_dev *dev = devlink_priv(devlink);
1606 struct mlx5_eswitch *esw = dev->priv.eswitch;
1610 err = mlx5_devlink_eswitch_check(devlink);
1614 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1615 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1616 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1619 case MLX5_CAP_INLINE_MODE_L2:
1620 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
1622 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1626 if (esw->offloads.num_flows > 0) {
1627 NL_SET_ERR_MSG_MOD(extack,
1628 "Can't set inline mode when flows are configured");
1632 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1636 for (vport = 1; vport < esw->enabled_vports; vport++) {
1637 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1639 NL_SET_ERR_MSG_MOD(extack,
1640 "Failed to set min inline on vport");
1641 goto revert_inline_mode;
1645 esw->offloads.inline_mode = mlx5_mode;
1650 mlx5_modify_nic_vport_min_inline(dev,
1652 esw->offloads.inline_mode);
1657 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1659 struct mlx5_core_dev *dev = devlink_priv(devlink);
1660 struct mlx5_eswitch *esw = dev->priv.eswitch;
1663 err = mlx5_devlink_eswitch_check(devlink);
1667 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1670 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1672 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1673 struct mlx5_core_dev *dev = esw->dev;
1676 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1679 if (esw->mode == SRIOV_NONE)
1682 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1683 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1684 mlx5_mode = MLX5_INLINE_MODE_NONE;
1686 case MLX5_CAP_INLINE_MODE_L2:
1687 mlx5_mode = MLX5_INLINE_MODE_L2;
1689 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1694 for (vport = 1; vport <= nvfs; vport++) {
1695 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1696 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1698 prev_mlx5_mode = mlx5_mode;
1706 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
1707 struct netlink_ext_ack *extack)
1709 struct mlx5_core_dev *dev = devlink_priv(devlink);
1710 struct mlx5_eswitch *esw = dev->priv.eswitch;
1713 err = mlx5_devlink_eswitch_check(devlink);
1717 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1718 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
1719 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1722 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1725 if (esw->mode == SRIOV_LEGACY) {
1726 esw->offloads.encap = encap;
1730 if (esw->offloads.encap == encap)
1733 if (esw->offloads.num_flows > 0) {
1734 NL_SET_ERR_MSG_MOD(extack,
1735 "Can't set encapsulation when flows are configured");
1739 esw_destroy_offloads_fdb_tables(esw);
1741 esw->offloads.encap = encap;
1743 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
1746 NL_SET_ERR_MSG_MOD(extack,
1747 "Failed re-creating fast FDB table");
1748 esw->offloads.encap = !encap;
1749 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
1755 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1757 struct mlx5_core_dev *dev = devlink_priv(devlink);
1758 struct mlx5_eswitch *esw = dev->priv.eswitch;
1761 err = mlx5_devlink_eswitch_check(devlink);
1765 *encap = esw->offloads.encap;
1769 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1771 struct mlx5_eswitch_rep_if *__rep_if,
1774 struct mlx5_esw_offload *offloads = &esw->offloads;
1775 struct mlx5_eswitch_rep_if *rep_if;
1777 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1779 rep_if->load = __rep_if->load;
1780 rep_if->unload = __rep_if->unload;
1781 rep_if->get_proto_dev = __rep_if->get_proto_dev;
1782 rep_if->priv = __rep_if->priv;
1784 rep_if->valid = true;
1786 EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
1788 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1789 int vport_index, u8 rep_type)
1791 struct mlx5_esw_offload *offloads = &esw->offloads;
1792 struct mlx5_eswitch_rep *rep;
1794 rep = &offloads->vport_reps[vport_index];
1796 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1797 rep->rep_if[rep_type].unload(rep);
1799 rep->rep_if[rep_type].valid = false;
1801 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
1803 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1805 #define UPLINK_REP_INDEX 0
1806 struct mlx5_esw_offload *offloads = &esw->offloads;
1807 struct mlx5_eswitch_rep *rep;
1809 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1810 return rep->rep_if[rep_type].priv;
1813 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1817 struct mlx5_esw_offload *offloads = &esw->offloads;
1818 struct mlx5_eswitch_rep *rep;
1820 if (vport == MLX5_VPORT_UPLINK)
1821 vport = UPLINK_REP_INDEX;
1823 rep = &offloads->vport_reps[vport];
1825 if (rep->rep_if[rep_type].valid &&
1826 rep->rep_if[rep_type].get_proto_dev)
1827 return rep->rep_if[rep_type].get_proto_dev(rep);
1830 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
1832 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1834 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1836 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1838 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1841 return &esw->offloads.vport_reps[vport];
1843 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);