]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: Fix kbuild warnings for uninitialized parameters
[linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/vxlan.h>
46 #include "en.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "vxlan.h"
50
51 struct mlx5e_tc_flow {
52         struct rhash_head       node;
53         u64                     cookie;
54         struct mlx5_flow_handle *rule;
55         struct list_head        encap; /* flows sharing the same encap */
56         struct mlx5_esw_flow_attr *attr;
57 };
58
59 enum {
60         MLX5_HEADER_TYPE_VXLAN = 0x0,
61         MLX5_HEADER_TYPE_NVGRE = 0x1,
62 };
63
64 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65 #define MLX5E_TC_TABLE_NUM_GROUPS 4
66
67 static struct mlx5_flow_handle *
68 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69                       struct mlx5_flow_spec *spec,
70                       u32 action, u32 flow_tag)
71 {
72         struct mlx5_core_dev *dev = priv->mdev;
73         struct mlx5_flow_destination dest = { 0 };
74         struct mlx5_flow_act flow_act = {
75                 .action = action,
76                 .flow_tag = flow_tag,
77                 .encap_id = 0,
78         };
79         struct mlx5_fc *counter = NULL;
80         struct mlx5_flow_handle *rule;
81         bool table_created = false;
82
83         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85                 dest.ft = priv->fs.vlan.ft.t;
86         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
87                 counter = mlx5_fc_create(dev, true);
88                 if (IS_ERR(counter))
89                         return ERR_CAST(counter);
90
91                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92                 dest.counter = counter;
93         }
94
95         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96                 priv->fs.tc.t =
97                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98                                                             MLX5E_TC_PRIO,
99                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
100                                                             MLX5E_TC_TABLE_NUM_GROUPS,
101                                                             0, 0);
102                 if (IS_ERR(priv->fs.tc.t)) {
103                         netdev_err(priv->netdev,
104                                    "Failed to create tc offload table\n");
105                         rule = ERR_CAST(priv->fs.tc.t);
106                         goto err_create_ft;
107                 }
108
109                 table_created = true;
110         }
111
112         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
113         rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
114
115         if (IS_ERR(rule))
116                 goto err_add_rule;
117
118         return rule;
119
120 err_add_rule:
121         if (table_created) {
122                 mlx5_destroy_flow_table(priv->fs.tc.t);
123                 priv->fs.tc.t = NULL;
124         }
125 err_create_ft:
126         mlx5_fc_destroy(dev, counter);
127
128         return rule;
129 }
130
131 static struct mlx5_flow_handle *
132 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133                       struct mlx5_flow_spec *spec,
134                       struct mlx5_esw_flow_attr *attr)
135 {
136         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
137         int err;
138
139         err = mlx5_eswitch_add_vlan_action(esw, attr);
140         if (err)
141                 return ERR_PTR(err);
142
143         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
144 }
145
146 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
147                                struct mlx5e_tc_flow *flow) {
148         struct list_head *next = flow->encap.next;
149
150         list_del(&flow->encap);
151         if (list_empty(next)) {
152                 struct mlx5_encap_entry *e;
153
154                 e = list_entry(next, struct mlx5_encap_entry, flows);
155                 if (e->n) {
156                         mlx5_encap_dealloc(priv->mdev, e->encap_id);
157                         neigh_release(e->n);
158                 }
159                 hlist_del_rcu(&e->encap_hlist);
160                 kfree(e);
161         }
162 }
163
164 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
165                               struct mlx5e_tc_flow *flow)
166 {
167         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
168         struct mlx5_fc *counter = NULL;
169
170         counter = mlx5_flow_rule_counter(flow->rule);
171
172         mlx5_del_flow_rules(flow->rule);
173
174         if (esw && esw->mode == SRIOV_OFFLOADS) {
175                 mlx5_eswitch_del_vlan_action(esw, flow->attr);
176                 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
177                         mlx5e_detach_encap(priv, flow);
178         }
179
180         mlx5_fc_destroy(priv->mdev, counter);
181
182         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
183                 mlx5_destroy_flow_table(priv->fs.tc.t);
184                 priv->fs.tc.t = NULL;
185         }
186 }
187
188 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
189                              struct tc_cls_flower_offload *f)
190 {
191         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
192                                        outer_headers);
193         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
194                                        outer_headers);
195         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196                                     misc_parameters);
197         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198                                     misc_parameters);
199
200         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
201         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
202
203         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
204                 struct flow_dissector_key_keyid *key =
205                         skb_flow_dissector_target(f->dissector,
206                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
207                                                   f->key);
208                 struct flow_dissector_key_keyid *mask =
209                         skb_flow_dissector_target(f->dissector,
210                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
211                                                   f->mask);
212                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
213                          be32_to_cpu(mask->keyid));
214                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
215                          be32_to_cpu(key->keyid));
216         }
217 }
218
219 static int parse_tunnel_attr(struct mlx5e_priv *priv,
220                              struct mlx5_flow_spec *spec,
221                              struct tc_cls_flower_offload *f)
222 {
223         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
224                                        outer_headers);
225         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226                                        outer_headers);
227
228         struct flow_dissector_key_control *enc_control =
229                 skb_flow_dissector_target(f->dissector,
230                                           FLOW_DISSECTOR_KEY_ENC_CONTROL,
231                                           f->key);
232
233         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
234                 struct flow_dissector_key_ports *key =
235                         skb_flow_dissector_target(f->dissector,
236                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
237                                                   f->key);
238                 struct flow_dissector_key_ports *mask =
239                         skb_flow_dissector_target(f->dissector,
240                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
241                                                   f->mask);
242
243                 /* Full udp dst port must be given */
244                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
245                         goto vxlan_match_offload_err;
246
247                 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
248                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
249                         parse_vxlan_attr(spec, f);
250                 else {
251                         netdev_warn(priv->netdev,
252                                     "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
253                         return -EOPNOTSUPP;
254                 }
255
256                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
257                          udp_dport, ntohs(mask->dst));
258                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
259                          udp_dport, ntohs(key->dst));
260
261                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
262                          udp_sport, ntohs(mask->src));
263                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
264                          udp_sport, ntohs(key->src));
265         } else { /* udp dst port must be given */
266 vxlan_match_offload_err:
267                 netdev_warn(priv->netdev,
268                             "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
269                 return -EOPNOTSUPP;
270         }
271
272         if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
273                 struct flow_dissector_key_ipv4_addrs *key =
274                         skb_flow_dissector_target(f->dissector,
275                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
276                                                   f->key);
277                 struct flow_dissector_key_ipv4_addrs *mask =
278                         skb_flow_dissector_target(f->dissector,
279                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
280                                                   f->mask);
281                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
282                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
283                          ntohl(mask->src));
284                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
285                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
286                          ntohl(key->src));
287
288                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
289                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
290                          ntohl(mask->dst));
291                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
292                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
293                          ntohl(key->dst));
294
295                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
296                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
297         }
298
299         /* Enforce DMAC when offloading incoming tunneled flows.
300          * Flow counters require a match on the DMAC.
301          */
302         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
303         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
304         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
305                                      dmac_47_16), priv->netdev->dev_addr);
306
307         /* let software handle IP fragments */
308         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
309         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
310
311         return 0;
312 }
313
314 static int __parse_cls_flower(struct mlx5e_priv *priv,
315                               struct mlx5_flow_spec *spec,
316                               struct tc_cls_flower_offload *f,
317                               u8 *min_inline)
318 {
319         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
320                                        outer_headers);
321         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
322                                        outer_headers);
323         u16 addr_type = 0;
324         u8 ip_proto = 0;
325
326         *min_inline = MLX5_INLINE_MODE_L2;
327
328         if (f->dissector->used_keys &
329             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
330               BIT(FLOW_DISSECTOR_KEY_BASIC) |
331               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
332               BIT(FLOW_DISSECTOR_KEY_VLAN) |
333               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
334               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
335               BIT(FLOW_DISSECTOR_KEY_PORTS) |
336               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
337               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
338               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
339               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
340               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
341                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
342                             f->dissector->used_keys);
343                 return -EOPNOTSUPP;
344         }
345
346         if ((dissector_uses_key(f->dissector,
347                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
348              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
349              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
350             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
351                 struct flow_dissector_key_control *key =
352                         skb_flow_dissector_target(f->dissector,
353                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
354                                                   f->key);
355                 switch (key->addr_type) {
356                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
357                         if (parse_tunnel_attr(priv, spec, f))
358                                 return -EOPNOTSUPP;
359                         break;
360                 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
361                         netdev_warn(priv->netdev,
362                                     "IPv6 tunnel decap offload isn't supported\n");
363                 default:
364                         return -EOPNOTSUPP;
365                 }
366
367                 /* In decap flow, header pointers should point to the inner
368                  * headers, outer header were already set by parse_tunnel_attr
369                  */
370                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
371                                          inner_headers);
372                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
373                                          inner_headers);
374         }
375
376         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
377                 struct flow_dissector_key_control *key =
378                         skb_flow_dissector_target(f->dissector,
379                                                   FLOW_DISSECTOR_KEY_CONTROL,
380                                                   f->key);
381
382                 struct flow_dissector_key_control *mask =
383                         skb_flow_dissector_target(f->dissector,
384                                                   FLOW_DISSECTOR_KEY_CONTROL,
385                                                   f->mask);
386                 addr_type = key->addr_type;
387
388                 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
389                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
390                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
391                                  key->flags & FLOW_DIS_IS_FRAGMENT);
392
393                         /* the HW doesn't need L3 inline to match on frag=no */
394                         if (key->flags & FLOW_DIS_IS_FRAGMENT)
395                                 *min_inline = MLX5_INLINE_MODE_IP;
396                 }
397         }
398
399         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
400                 struct flow_dissector_key_basic *key =
401                         skb_flow_dissector_target(f->dissector,
402                                                   FLOW_DISSECTOR_KEY_BASIC,
403                                                   f->key);
404                 struct flow_dissector_key_basic *mask =
405                         skb_flow_dissector_target(f->dissector,
406                                                   FLOW_DISSECTOR_KEY_BASIC,
407                                                   f->mask);
408                 ip_proto = key->ip_proto;
409
410                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
411                          ntohs(mask->n_proto));
412                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
413                          ntohs(key->n_proto));
414
415                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
416                          mask->ip_proto);
417                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
418                          key->ip_proto);
419
420                 if (mask->ip_proto)
421                         *min_inline = MLX5_INLINE_MODE_IP;
422         }
423
424         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
425                 struct flow_dissector_key_eth_addrs *key =
426                         skb_flow_dissector_target(f->dissector,
427                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
428                                                   f->key);
429                 struct flow_dissector_key_eth_addrs *mask =
430                         skb_flow_dissector_target(f->dissector,
431                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
432                                                   f->mask);
433
434                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
435                                              dmac_47_16),
436                                 mask->dst);
437                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
438                                              dmac_47_16),
439                                 key->dst);
440
441                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
442                                              smac_47_16),
443                                 mask->src);
444                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
445                                              smac_47_16),
446                                 key->src);
447         }
448
449         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
450                 struct flow_dissector_key_vlan *key =
451                         skb_flow_dissector_target(f->dissector,
452                                                   FLOW_DISSECTOR_KEY_VLAN,
453                                                   f->key);
454                 struct flow_dissector_key_vlan *mask =
455                         skb_flow_dissector_target(f->dissector,
456                                                   FLOW_DISSECTOR_KEY_VLAN,
457                                                   f->mask);
458                 if (mask->vlan_id || mask->vlan_priority) {
459                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
460                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
461
462                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
463                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
464
465                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
466                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
467                 }
468         }
469
470         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
471                 struct flow_dissector_key_ipv4_addrs *key =
472                         skb_flow_dissector_target(f->dissector,
473                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
474                                                   f->key);
475                 struct flow_dissector_key_ipv4_addrs *mask =
476                         skb_flow_dissector_target(f->dissector,
477                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
478                                                   f->mask);
479
480                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
481                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
482                        &mask->src, sizeof(mask->src));
483                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
484                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
485                        &key->src, sizeof(key->src));
486                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
487                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
488                        &mask->dst, sizeof(mask->dst));
489                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
490                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
491                        &key->dst, sizeof(key->dst));
492
493                 if (mask->src || mask->dst)
494                         *min_inline = MLX5_INLINE_MODE_IP;
495         }
496
497         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
498                 struct flow_dissector_key_ipv6_addrs *key =
499                         skb_flow_dissector_target(f->dissector,
500                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
501                                                   f->key);
502                 struct flow_dissector_key_ipv6_addrs *mask =
503                         skb_flow_dissector_target(f->dissector,
504                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
505                                                   f->mask);
506
507                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
508                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
509                        &mask->src, sizeof(mask->src));
510                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
511                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
512                        &key->src, sizeof(key->src));
513
514                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
515                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
516                        &mask->dst, sizeof(mask->dst));
517                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
518                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
519                        &key->dst, sizeof(key->dst));
520
521                 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
522                     ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
523                         *min_inline = MLX5_INLINE_MODE_IP;
524         }
525
526         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
527                 struct flow_dissector_key_ports *key =
528                         skb_flow_dissector_target(f->dissector,
529                                                   FLOW_DISSECTOR_KEY_PORTS,
530                                                   f->key);
531                 struct flow_dissector_key_ports *mask =
532                         skb_flow_dissector_target(f->dissector,
533                                                   FLOW_DISSECTOR_KEY_PORTS,
534                                                   f->mask);
535                 switch (ip_proto) {
536                 case IPPROTO_TCP:
537                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
538                                  tcp_sport, ntohs(mask->src));
539                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
540                                  tcp_sport, ntohs(key->src));
541
542                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
543                                  tcp_dport, ntohs(mask->dst));
544                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
545                                  tcp_dport, ntohs(key->dst));
546                         break;
547
548                 case IPPROTO_UDP:
549                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
550                                  udp_sport, ntohs(mask->src));
551                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
552                                  udp_sport, ntohs(key->src));
553
554                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
555                                  udp_dport, ntohs(mask->dst));
556                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
557                                  udp_dport, ntohs(key->dst));
558                         break;
559                 default:
560                         netdev_err(priv->netdev,
561                                    "Only UDP and TCP transport are supported\n");
562                         return -EINVAL;
563                 }
564
565                 if (mask->src || mask->dst)
566                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
567         }
568
569         return 0;
570 }
571
572 static int parse_cls_flower(struct mlx5e_priv *priv,
573                             struct mlx5_flow_spec *spec,
574                             struct tc_cls_flower_offload *f)
575 {
576         struct mlx5_core_dev *dev = priv->mdev;
577         struct mlx5_eswitch *esw = dev->priv.eswitch;
578         struct mlx5_eswitch_rep *rep = priv->ppriv;
579         u8 min_inline;
580         int err;
581
582         err = __parse_cls_flower(priv, spec, f, &min_inline);
583
584         if (!err && esw->mode == SRIOV_OFFLOADS &&
585             rep->vport != FDB_UPLINK_VPORT) {
586                 if (min_inline > esw->offloads.inline_mode) {
587                         netdev_warn(priv->netdev,
588                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
589                                     min_inline, esw->offloads.inline_mode);
590                         return -EOPNOTSUPP;
591                 }
592         }
593
594         return err;
595 }
596
597 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
598                                 u32 *action, u32 *flow_tag)
599 {
600         const struct tc_action *a;
601         LIST_HEAD(actions);
602
603         if (tc_no_actions(exts))
604                 return -EINVAL;
605
606         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
607         *action = 0;
608
609         tcf_exts_to_list(exts, &actions);
610         list_for_each_entry(a, &actions, list) {
611                 /* Only support a single action per rule */
612                 if (*action)
613                         return -EINVAL;
614
615                 if (is_tcf_gact_shot(a)) {
616                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
617                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
618                                                flow_table_properties_nic_receive.flow_counter))
619                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
620                         continue;
621                 }
622
623                 if (is_tcf_skbedit_mark(a)) {
624                         u32 mark = tcf_skbedit_mark(a);
625
626                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
627                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
628                                             mark);
629                                 return -EINVAL;
630                         }
631
632                         *flow_tag = mark;
633                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
634                         continue;
635                 }
636
637                 return -EINVAL;
638         }
639
640         return 0;
641 }
642
643 static inline int cmp_encap_info(struct mlx5_encap_info *a,
644                                  struct mlx5_encap_info *b)
645 {
646         return memcmp(a, b, sizeof(*a));
647 }
648
649 static inline int hash_encap_info(struct mlx5_encap_info *info)
650 {
651         return jhash(info, sizeof(*info), 0);
652 }
653
654 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
655                                    struct net_device *mirred_dev,
656                                    struct net_device **out_dev,
657                                    struct flowi4 *fl4,
658                                    struct neighbour **out_n,
659                                    __be32 *saddr,
660                                    int *out_ttl)
661 {
662         struct rtable *rt;
663         struct neighbour *n = NULL;
664         int ttl;
665
666 #if IS_ENABLED(CONFIG_INET)
667         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
668         if (IS_ERR(rt))
669                 return PTR_ERR(rt);
670 #else
671         return -EOPNOTSUPP;
672 #endif
673
674         if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
675                 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
676                 ip_rt_put(rt);
677                 return -EOPNOTSUPP;
678         }
679
680         ttl = ip4_dst_hoplimit(&rt->dst);
681         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
682         ip_rt_put(rt);
683         if (!n)
684                 return -ENOMEM;
685
686         *out_n = n;
687         *saddr = fl4->saddr;
688         *out_ttl = ttl;
689         *out_dev = rt->dst.dev;
690
691         return 0;
692 }
693
694 static int gen_vxlan_header_ipv4(struct net_device *out_dev,
695                                  char buf[],
696                                  unsigned char h_dest[ETH_ALEN],
697                                  int ttl,
698                                  __be32 daddr,
699                                  __be32 saddr,
700                                  __be16 udp_dst_port,
701                                  __be32 vx_vni)
702 {
703         int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
704         struct ethhdr *eth = (struct ethhdr *)buf;
705         struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
706         struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
707         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
708
709         memset(buf, 0, encap_size);
710
711         ether_addr_copy(eth->h_dest, h_dest);
712         ether_addr_copy(eth->h_source, out_dev->dev_addr);
713         eth->h_proto = htons(ETH_P_IP);
714
715         ip->daddr = daddr;
716         ip->saddr = saddr;
717
718         ip->ttl = ttl;
719         ip->protocol = IPPROTO_UDP;
720         ip->version = 0x4;
721         ip->ihl = 0x5;
722
723         udp->dest = udp_dst_port;
724         vxh->vx_flags = VXLAN_HF_VNI;
725         vxh->vx_vni = vxlan_vni_field(vx_vni);
726
727         return encap_size;
728 }
729
730 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
731                                           struct net_device *mirred_dev,
732                                           struct mlx5_encap_entry *e,
733                                           struct net_device **out_dev)
734 {
735         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
736         struct neighbour *n = NULL;
737         struct flowi4 fl4 = {};
738         char *encap_header;
739         int encap_size;
740         __be32 saddr = 0;
741         int ttl = 0;
742         int err;
743
744         encap_header = kzalloc(max_encap_size, GFP_KERNEL);
745         if (!encap_header)
746                 return -ENOMEM;
747
748         switch (e->tunnel_type) {
749         case MLX5_HEADER_TYPE_VXLAN:
750                 fl4.flowi4_proto = IPPROTO_UDP;
751                 fl4.fl4_dport = e->tun_info.tp_dst;
752                 break;
753         default:
754                 err = -EOPNOTSUPP;
755                 goto out;
756         }
757         fl4.daddr = e->tun_info.daddr;
758
759         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
760                                       &fl4, &n, &saddr, &ttl);
761         if (err)
762                 goto out;
763
764         e->n = n;
765         e->out_dev = *out_dev;
766
767         if (!(n->nud_state & NUD_VALID)) {
768                 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
769                 err = -EOPNOTSUPP;
770                 goto out;
771         }
772
773         neigh_ha_snapshot(e->h_dest, n, *out_dev);
774
775         switch (e->tunnel_type) {
776         case MLX5_HEADER_TYPE_VXLAN:
777                 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
778                                                    e->h_dest, ttl,
779                                                    e->tun_info.daddr,
780                                                    saddr, e->tun_info.tp_dst,
781                                                    e->tun_info.tun_id);
782                 break;
783         default:
784                 err = -EOPNOTSUPP;
785                 goto out;
786         }
787
788         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
789                                encap_size, encap_header, &e->encap_id);
790 out:
791         if (err && n)
792                 neigh_release(n);
793         kfree(encap_header);
794         return err;
795 }
796
797 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
798                               struct ip_tunnel_info *tun_info,
799                               struct net_device *mirred_dev,
800                               struct mlx5_esw_flow_attr *attr)
801 {
802         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
803         unsigned short family = ip_tunnel_info_af(tun_info);
804         struct ip_tunnel_key *key = &tun_info->key;
805         struct mlx5_encap_info info;
806         struct mlx5_encap_entry *e;
807         struct net_device *out_dev;
808         uintptr_t hash_key;
809         bool found = false;
810         int tunnel_type;
811         int err;
812
813         /* udp dst port must be set */
814         if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
815                 goto vxlan_encap_offload_err;
816
817         /* setting udp src port isn't supported */
818         if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
819 vxlan_encap_offload_err:
820                 netdev_warn(priv->netdev,
821                             "must set udp dst port and not set udp src port\n");
822                 return -EOPNOTSUPP;
823         }
824
825         if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
826             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
827                 info.tp_dst = key->tp_dst;
828                 info.tun_id = tunnel_id_to_key32(key->tun_id);
829                 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
830         } else {
831                 netdev_warn(priv->netdev,
832                             "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
833                 return -EOPNOTSUPP;
834         }
835
836         switch (family) {
837         case AF_INET:
838                 info.daddr = key->u.ipv4.dst;
839                 break;
840         case AF_INET6:
841                 netdev_warn(priv->netdev,
842                             "IPv6 tunnel encap offload isn't supported\n");
843         default:
844                 return -EOPNOTSUPP;
845         }
846
847         hash_key = hash_encap_info(&info);
848
849         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
850                                    encap_hlist, hash_key) {
851                 if (!cmp_encap_info(&e->tun_info, &info)) {
852                         found = true;
853                         break;
854                 }
855         }
856
857         if (found) {
858                 attr->encap = e;
859                 return 0;
860         }
861
862         e = kzalloc(sizeof(*e), GFP_KERNEL);
863         if (!e)
864                 return -ENOMEM;
865
866         e->tun_info = info;
867         e->tunnel_type = tunnel_type;
868         INIT_LIST_HEAD(&e->flows);
869
870         err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
871         if (err)
872                 goto out_err;
873
874         attr->encap = e;
875         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
876
877         return err;
878
879 out_err:
880         kfree(e);
881         return err;
882 }
883
884 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
885                                 struct mlx5e_tc_flow *flow)
886 {
887         struct mlx5_esw_flow_attr *attr = flow->attr;
888         struct ip_tunnel_info *info = NULL;
889         const struct tc_action *a;
890         LIST_HEAD(actions);
891         bool encap = false;
892         int err;
893
894         if (tc_no_actions(exts))
895                 return -EINVAL;
896
897         memset(attr, 0, sizeof(*attr));
898         attr->in_rep = priv->ppriv;
899
900         tcf_exts_to_list(exts, &actions);
901         list_for_each_entry(a, &actions, list) {
902                 if (is_tcf_gact_shot(a)) {
903                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
904                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
905                         continue;
906                 }
907
908                 if (is_tcf_mirred_egress_redirect(a)) {
909                         int ifindex = tcf_mirred_ifindex(a);
910                         struct net_device *out_dev;
911                         struct mlx5e_priv *out_priv;
912
913                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
914
915                         if (switchdev_port_same_parent_id(priv->netdev,
916                                                           out_dev)) {
917                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
918                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
919                                 out_priv = netdev_priv(out_dev);
920                                 attr->out_rep = out_priv->ppriv;
921                         } else if (encap) {
922                                 err = mlx5e_attach_encap(priv, info,
923                                                          out_dev, attr);
924                                 if (err)
925                                         return err;
926                                 list_add(&flow->encap, &attr->encap->flows);
927                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
928                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
929                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
930                                 out_priv = netdev_priv(attr->encap->out_dev);
931                                 attr->out_rep = out_priv->ppriv;
932                         } else {
933                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
934                                        priv->netdev->name, out_dev->name);
935                                 return -EINVAL;
936                         }
937                         continue;
938                 }
939
940                 if (is_tcf_tunnel_set(a)) {
941                         info = tcf_tunnel_info(a);
942                         if (info)
943                                 encap = true;
944                         else
945                                 return -EOPNOTSUPP;
946                         continue;
947                 }
948
949                 if (is_tcf_vlan(a)) {
950                         if (tcf_vlan_action(a) == VLAN_F_POP) {
951                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
952                         } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
953                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
954                                         return -EOPNOTSUPP;
955
956                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
957                                 attr->vlan = tcf_vlan_push_vid(a);
958                         }
959                         continue;
960                 }
961
962                 if (is_tcf_tunnel_release(a)) {
963                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
964                         continue;
965                 }
966
967                 return -EINVAL;
968         }
969         return 0;
970 }
971
972 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
973                            struct tc_cls_flower_offload *f)
974 {
975         struct mlx5e_tc_table *tc = &priv->fs.tc;
976         int err = 0;
977         bool fdb_flow = false;
978         u32 flow_tag, action;
979         struct mlx5e_tc_flow *flow;
980         struct mlx5_flow_spec *spec;
981         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
982
983         if (esw && esw->mode == SRIOV_OFFLOADS)
984                 fdb_flow = true;
985
986         if (fdb_flow)
987                 flow = kzalloc(sizeof(*flow) +
988                                sizeof(struct mlx5_esw_flow_attr),
989                                GFP_KERNEL);
990         else
991                 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
992
993         spec = mlx5_vzalloc(sizeof(*spec));
994         if (!spec || !flow) {
995                 err = -ENOMEM;
996                 goto err_free;
997         }
998
999         flow->cookie = f->cookie;
1000
1001         err = parse_cls_flower(priv, spec, f);
1002         if (err < 0)
1003                 goto err_free;
1004
1005         if (fdb_flow) {
1006                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
1007                 err = parse_tc_fdb_actions(priv, f->exts, flow);
1008                 if (err < 0)
1009                         goto err_free;
1010                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
1011         } else {
1012                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1013                 if (err < 0)
1014                         goto err_free;
1015                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1016         }
1017
1018         if (IS_ERR(flow->rule)) {
1019                 err = PTR_ERR(flow->rule);
1020                 goto err_free;
1021         }
1022
1023         err = rhashtable_insert_fast(&tc->ht, &flow->node,
1024                                      tc->ht_params);
1025         if (err)
1026                 goto err_del_rule;
1027
1028         goto out;
1029
1030 err_del_rule:
1031         mlx5_del_flow_rules(flow->rule);
1032
1033 err_free:
1034         kfree(flow);
1035 out:
1036         kvfree(spec);
1037         return err;
1038 }
1039
1040 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1041                         struct tc_cls_flower_offload *f)
1042 {
1043         struct mlx5e_tc_flow *flow;
1044         struct mlx5e_tc_table *tc = &priv->fs.tc;
1045
1046         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1047                                       tc->ht_params);
1048         if (!flow)
1049                 return -EINVAL;
1050
1051         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1052
1053         mlx5e_tc_del_flow(priv, flow);
1054
1055
1056         kfree(flow);
1057
1058         return 0;
1059 }
1060
1061 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1062                        struct tc_cls_flower_offload *f)
1063 {
1064         struct mlx5e_tc_table *tc = &priv->fs.tc;
1065         struct mlx5e_tc_flow *flow;
1066         struct tc_action *a;
1067         struct mlx5_fc *counter;
1068         LIST_HEAD(actions);
1069         u64 bytes;
1070         u64 packets;
1071         u64 lastuse;
1072
1073         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1074                                       tc->ht_params);
1075         if (!flow)
1076                 return -EINVAL;
1077
1078         counter = mlx5_flow_rule_counter(flow->rule);
1079         if (!counter)
1080                 return 0;
1081
1082         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1083
1084         tcf_exts_to_list(f->exts, &actions);
1085         list_for_each_entry(a, &actions, list)
1086                 tcf_action_stats_update(a, bytes, packets, lastuse);
1087
1088         return 0;
1089 }
1090
1091 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1092         .head_offset = offsetof(struct mlx5e_tc_flow, node),
1093         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1094         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1095         .automatic_shrinking = true,
1096 };
1097
1098 int mlx5e_tc_init(struct mlx5e_priv *priv)
1099 {
1100         struct mlx5e_tc_table *tc = &priv->fs.tc;
1101
1102         tc->ht_params = mlx5e_tc_flow_ht_params;
1103         return rhashtable_init(&tc->ht, &tc->ht_params);
1104 }
1105
1106 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1107 {
1108         struct mlx5e_tc_flow *flow = ptr;
1109         struct mlx5e_priv *priv = arg;
1110
1111         mlx5e_tc_del_flow(priv, flow);
1112         kfree(flow);
1113 }
1114
1115 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1116 {
1117         struct mlx5e_tc_table *tc = &priv->fs.tc;
1118
1119         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1120
1121         if (!IS_ERR_OR_NULL(tc->t)) {
1122                 mlx5_destroy_flow_table(tc->t);
1123                 tc->t = NULL;
1124         }
1125 }