]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: Properly handle offloading of source udp port for IP tunnels
[linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/vxlan.h>
46 #include "en.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "vxlan.h"
50
51 struct mlx5e_tc_flow {
52         struct rhash_head       node;
53         u64                     cookie;
54         struct mlx5_flow_handle *rule;
55         struct list_head        encap; /* flows sharing the same encap */
56         struct mlx5_esw_flow_attr *attr;
57 };
58
59 enum {
60         MLX5_HEADER_TYPE_VXLAN = 0x0,
61         MLX5_HEADER_TYPE_NVGRE = 0x1,
62 };
63
64 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65 #define MLX5E_TC_TABLE_NUM_GROUPS 4
66
67 static struct mlx5_flow_handle *
68 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69                       struct mlx5_flow_spec *spec,
70                       u32 action, u32 flow_tag)
71 {
72         struct mlx5_core_dev *dev = priv->mdev;
73         struct mlx5_flow_destination dest = { 0 };
74         struct mlx5_flow_act flow_act = {
75                 .action = action,
76                 .flow_tag = flow_tag,
77                 .encap_id = 0,
78         };
79         struct mlx5_fc *counter = NULL;
80         struct mlx5_flow_handle *rule;
81         bool table_created = false;
82
83         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85                 dest.ft = priv->fs.vlan.ft.t;
86         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
87                 counter = mlx5_fc_create(dev, true);
88                 if (IS_ERR(counter))
89                         return ERR_CAST(counter);
90
91                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92                 dest.counter = counter;
93         }
94
95         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96                 priv->fs.tc.t =
97                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98                                                             MLX5E_TC_PRIO,
99                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
100                                                             MLX5E_TC_TABLE_NUM_GROUPS,
101                                                             0, 0);
102                 if (IS_ERR(priv->fs.tc.t)) {
103                         netdev_err(priv->netdev,
104                                    "Failed to create tc offload table\n");
105                         rule = ERR_CAST(priv->fs.tc.t);
106                         goto err_create_ft;
107                 }
108
109                 table_created = true;
110         }
111
112         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
113         rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
114
115         if (IS_ERR(rule))
116                 goto err_add_rule;
117
118         return rule;
119
120 err_add_rule:
121         if (table_created) {
122                 mlx5_destroy_flow_table(priv->fs.tc.t);
123                 priv->fs.tc.t = NULL;
124         }
125 err_create_ft:
126         mlx5_fc_destroy(dev, counter);
127
128         return rule;
129 }
130
131 static struct mlx5_flow_handle *
132 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133                       struct mlx5_flow_spec *spec,
134                       struct mlx5_esw_flow_attr *attr)
135 {
136         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
137         int err;
138
139         err = mlx5_eswitch_add_vlan_action(esw, attr);
140         if (err)
141                 return ERR_PTR(err);
142
143         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
144 }
145
146 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
147                                struct mlx5e_tc_flow *flow) {
148         struct list_head *next = flow->encap.next;
149
150         list_del(&flow->encap);
151         if (list_empty(next)) {
152                 struct mlx5_encap_entry *e;
153
154                 e = list_entry(next, struct mlx5_encap_entry, flows);
155                 if (e->n) {
156                         mlx5_encap_dealloc(priv->mdev, e->encap_id);
157                         neigh_release(e->n);
158                 }
159                 hlist_del_rcu(&e->encap_hlist);
160                 kfree(e);
161         }
162 }
163
164 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
165                               struct mlx5e_tc_flow *flow)
166 {
167         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
168         struct mlx5_fc *counter = NULL;
169
170         counter = mlx5_flow_rule_counter(flow->rule);
171
172         mlx5_del_flow_rules(flow->rule);
173
174         if (esw && esw->mode == SRIOV_OFFLOADS) {
175                 mlx5_eswitch_del_vlan_action(esw, flow->attr);
176                 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
177                         mlx5e_detach_encap(priv, flow);
178         }
179
180         mlx5_fc_destroy(priv->mdev, counter);
181
182         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
183                 mlx5_destroy_flow_table(priv->fs.tc.t);
184                 priv->fs.tc.t = NULL;
185         }
186 }
187
188 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
189                              struct tc_cls_flower_offload *f)
190 {
191         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
192                                        outer_headers);
193         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
194                                        outer_headers);
195         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196                                     misc_parameters);
197         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198                                     misc_parameters);
199
200         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
201         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
202
203         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
204                 struct flow_dissector_key_keyid *key =
205                         skb_flow_dissector_target(f->dissector,
206                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
207                                                   f->key);
208                 struct flow_dissector_key_keyid *mask =
209                         skb_flow_dissector_target(f->dissector,
210                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
211                                                   f->mask);
212                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
213                          be32_to_cpu(mask->keyid));
214                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
215                          be32_to_cpu(key->keyid));
216         }
217 }
218
219 static int parse_tunnel_attr(struct mlx5e_priv *priv,
220                              struct mlx5_flow_spec *spec,
221                              struct tc_cls_flower_offload *f)
222 {
223         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
224                                        outer_headers);
225         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226                                        outer_headers);
227
228         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
229                 struct flow_dissector_key_ports *key =
230                         skb_flow_dissector_target(f->dissector,
231                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
232                                                   f->key);
233                 struct flow_dissector_key_ports *mask =
234                         skb_flow_dissector_target(f->dissector,
235                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
236                                                   f->mask);
237
238                 /* Full udp dst port must be given */
239                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
240                         return -EOPNOTSUPP;
241
242                 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
243                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
244                         parse_vxlan_attr(spec, f);
245                 else
246                         return -EOPNOTSUPP;
247
248                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
249                          udp_dport, ntohs(mask->dst));
250                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
251                          udp_dport, ntohs(key->dst));
252
253                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
254                          udp_sport, ntohs(mask->src));
255                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
256                          udp_sport, ntohs(key->src));
257         } else { /* udp dst port must be given */
258                         return -EOPNOTSUPP;
259         }
260
261         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
262                 struct flow_dissector_key_ipv4_addrs *key =
263                         skb_flow_dissector_target(f->dissector,
264                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
265                                                   f->key);
266                 struct flow_dissector_key_ipv4_addrs *mask =
267                         skb_flow_dissector_target(f->dissector,
268                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
269                                                   f->mask);
270                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
271                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
272                          ntohl(mask->src));
273                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
274                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
275                          ntohl(key->src));
276
277                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
278                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
279                          ntohl(mask->dst));
280                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
281                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
282                          ntohl(key->dst));
283         }
284
285         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
286         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
287
288         /* Enforce DMAC when offloading incoming tunneled flows.
289          * Flow counters require a match on the DMAC.
290          */
291         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
292         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
293         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
294                                      dmac_47_16), priv->netdev->dev_addr);
295
296         /* let software handle IP fragments */
297         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
298         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
299
300         return 0;
301 }
302
303 static int __parse_cls_flower(struct mlx5e_priv *priv,
304                               struct mlx5_flow_spec *spec,
305                               struct tc_cls_flower_offload *f,
306                               u8 *min_inline)
307 {
308         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
309                                        outer_headers);
310         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
311                                        outer_headers);
312         u16 addr_type = 0;
313         u8 ip_proto = 0;
314
315         *min_inline = MLX5_INLINE_MODE_L2;
316
317         if (f->dissector->used_keys &
318             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
319               BIT(FLOW_DISSECTOR_KEY_BASIC) |
320               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
321               BIT(FLOW_DISSECTOR_KEY_VLAN) |
322               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
323               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
324               BIT(FLOW_DISSECTOR_KEY_PORTS) |
325               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
326               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
327               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
328               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
329               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
330                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
331                             f->dissector->used_keys);
332                 return -EOPNOTSUPP;
333         }
334
335         if ((dissector_uses_key(f->dissector,
336                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
337              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
338              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
339             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
340                 struct flow_dissector_key_control *key =
341                         skb_flow_dissector_target(f->dissector,
342                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
343                                                   f->key);
344                 switch (key->addr_type) {
345                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
346                         if (parse_tunnel_attr(priv, spec, f))
347                                 return -EOPNOTSUPP;
348                         break;
349                 default:
350                         return -EOPNOTSUPP;
351                 }
352
353                 /* In decap flow, header pointers should point to the inner
354                  * headers, outer header were already set by parse_tunnel_attr
355                  */
356                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
357                                          inner_headers);
358                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
359                                          inner_headers);
360         }
361
362         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
363                 struct flow_dissector_key_control *key =
364                         skb_flow_dissector_target(f->dissector,
365                                                   FLOW_DISSECTOR_KEY_CONTROL,
366                                                   f->key);
367
368                 struct flow_dissector_key_control *mask =
369                         skb_flow_dissector_target(f->dissector,
370                                                   FLOW_DISSECTOR_KEY_CONTROL,
371                                                   f->mask);
372                 addr_type = key->addr_type;
373
374                 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
375                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
376                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
377                                  key->flags & FLOW_DIS_IS_FRAGMENT);
378                 }
379         }
380
381         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
382                 struct flow_dissector_key_basic *key =
383                         skb_flow_dissector_target(f->dissector,
384                                                   FLOW_DISSECTOR_KEY_BASIC,
385                                                   f->key);
386                 struct flow_dissector_key_basic *mask =
387                         skb_flow_dissector_target(f->dissector,
388                                                   FLOW_DISSECTOR_KEY_BASIC,
389                                                   f->mask);
390                 ip_proto = key->ip_proto;
391
392                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
393                          ntohs(mask->n_proto));
394                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
395                          ntohs(key->n_proto));
396
397                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
398                          mask->ip_proto);
399                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
400                          key->ip_proto);
401
402                 if (mask->ip_proto)
403                         *min_inline = MLX5_INLINE_MODE_IP;
404         }
405
406         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
407                 struct flow_dissector_key_eth_addrs *key =
408                         skb_flow_dissector_target(f->dissector,
409                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
410                                                   f->key);
411                 struct flow_dissector_key_eth_addrs *mask =
412                         skb_flow_dissector_target(f->dissector,
413                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
414                                                   f->mask);
415
416                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
417                                              dmac_47_16),
418                                 mask->dst);
419                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
420                                              dmac_47_16),
421                                 key->dst);
422
423                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
424                                              smac_47_16),
425                                 mask->src);
426                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
427                                              smac_47_16),
428                                 key->src);
429         }
430
431         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
432                 struct flow_dissector_key_vlan *key =
433                         skb_flow_dissector_target(f->dissector,
434                                                   FLOW_DISSECTOR_KEY_VLAN,
435                                                   f->key);
436                 struct flow_dissector_key_vlan *mask =
437                         skb_flow_dissector_target(f->dissector,
438                                                   FLOW_DISSECTOR_KEY_VLAN,
439                                                   f->mask);
440                 if (mask->vlan_id || mask->vlan_priority) {
441                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
442                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
443
444                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
445                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
446
447                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
448                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
449                 }
450         }
451
452         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
453                 struct flow_dissector_key_ipv4_addrs *key =
454                         skb_flow_dissector_target(f->dissector,
455                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
456                                                   f->key);
457                 struct flow_dissector_key_ipv4_addrs *mask =
458                         skb_flow_dissector_target(f->dissector,
459                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
460                                                   f->mask);
461
462                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
463                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
464                        &mask->src, sizeof(mask->src));
465                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
466                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
467                        &key->src, sizeof(key->src));
468                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
469                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
470                        &mask->dst, sizeof(mask->dst));
471                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
472                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
473                        &key->dst, sizeof(key->dst));
474
475                 if (mask->src || mask->dst)
476                         *min_inline = MLX5_INLINE_MODE_IP;
477         }
478
479         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
480                 struct flow_dissector_key_ipv6_addrs *key =
481                         skb_flow_dissector_target(f->dissector,
482                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
483                                                   f->key);
484                 struct flow_dissector_key_ipv6_addrs *mask =
485                         skb_flow_dissector_target(f->dissector,
486                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
487                                                   f->mask);
488
489                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
490                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
491                        &mask->src, sizeof(mask->src));
492                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
493                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
494                        &key->src, sizeof(key->src));
495
496                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
497                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
498                        &mask->dst, sizeof(mask->dst));
499                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
500                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
501                        &key->dst, sizeof(key->dst));
502
503                 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
504                     ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
505                         *min_inline = MLX5_INLINE_MODE_IP;
506         }
507
508         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
509                 struct flow_dissector_key_ports *key =
510                         skb_flow_dissector_target(f->dissector,
511                                                   FLOW_DISSECTOR_KEY_PORTS,
512                                                   f->key);
513                 struct flow_dissector_key_ports *mask =
514                         skb_flow_dissector_target(f->dissector,
515                                                   FLOW_DISSECTOR_KEY_PORTS,
516                                                   f->mask);
517                 switch (ip_proto) {
518                 case IPPROTO_TCP:
519                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
520                                  tcp_sport, ntohs(mask->src));
521                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
522                                  tcp_sport, ntohs(key->src));
523
524                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
525                                  tcp_dport, ntohs(mask->dst));
526                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
527                                  tcp_dport, ntohs(key->dst));
528                         break;
529
530                 case IPPROTO_UDP:
531                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
532                                  udp_sport, ntohs(mask->src));
533                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
534                                  udp_sport, ntohs(key->src));
535
536                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
537                                  udp_dport, ntohs(mask->dst));
538                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
539                                  udp_dport, ntohs(key->dst));
540                         break;
541                 default:
542                         netdev_err(priv->netdev,
543                                    "Only UDP and TCP transport are supported\n");
544                         return -EINVAL;
545                 }
546
547                 if (mask->src || mask->dst)
548                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
549         }
550
551         return 0;
552 }
553
554 static int parse_cls_flower(struct mlx5e_priv *priv,
555                             struct mlx5_flow_spec *spec,
556                             struct tc_cls_flower_offload *f)
557 {
558         struct mlx5_core_dev *dev = priv->mdev;
559         struct mlx5_eswitch *esw = dev->priv.eswitch;
560         struct mlx5_eswitch_rep *rep = priv->ppriv;
561         u8 min_inline;
562         int err;
563
564         err = __parse_cls_flower(priv, spec, f, &min_inline);
565
566         if (!err && esw->mode == SRIOV_OFFLOADS &&
567             rep->vport != FDB_UPLINK_VPORT) {
568                 if (min_inline > esw->offloads.inline_mode) {
569                         netdev_warn(priv->netdev,
570                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
571                                     min_inline, esw->offloads.inline_mode);
572                         return -EOPNOTSUPP;
573                 }
574         }
575
576         return err;
577 }
578
579 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
580                                 u32 *action, u32 *flow_tag)
581 {
582         const struct tc_action *a;
583         LIST_HEAD(actions);
584
585         if (tc_no_actions(exts))
586                 return -EINVAL;
587
588         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
589         *action = 0;
590
591         tcf_exts_to_list(exts, &actions);
592         list_for_each_entry(a, &actions, list) {
593                 /* Only support a single action per rule */
594                 if (*action)
595                         return -EINVAL;
596
597                 if (is_tcf_gact_shot(a)) {
598                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
599                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
600                                                flow_table_properties_nic_receive.flow_counter))
601                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
602                         continue;
603                 }
604
605                 if (is_tcf_skbedit_mark(a)) {
606                         u32 mark = tcf_skbedit_mark(a);
607
608                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
609                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
610                                             mark);
611                                 return -EINVAL;
612                         }
613
614                         *flow_tag = mark;
615                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
616                         continue;
617                 }
618
619                 return -EINVAL;
620         }
621
622         return 0;
623 }
624
625 static inline int cmp_encap_info(struct mlx5_encap_info *a,
626                                  struct mlx5_encap_info *b)
627 {
628         return memcmp(a, b, sizeof(*a));
629 }
630
631 static inline int hash_encap_info(struct mlx5_encap_info *info)
632 {
633         return jhash(info, sizeof(*info), 0);
634 }
635
636 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
637                                    struct net_device *mirred_dev,
638                                    struct net_device **out_dev,
639                                    struct flowi4 *fl4,
640                                    struct neighbour **out_n,
641                                    __be32 *saddr,
642                                    int *out_ttl)
643 {
644         struct rtable *rt;
645         struct neighbour *n = NULL;
646         int ttl;
647
648 #if IS_ENABLED(CONFIG_INET)
649         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
650         if (IS_ERR(rt)) {
651                 pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
652                 return -EOPNOTSUPP;
653         }
654 #else
655         return -EOPNOTSUPP;
656 #endif
657
658         if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
659                 pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
660                         __func__);
661                 ip_rt_put(rt);
662                 return -EOPNOTSUPP;
663         }
664
665         ttl = ip4_dst_hoplimit(&rt->dst);
666         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
667         ip_rt_put(rt);
668         if (!n)
669                 return -ENOMEM;
670
671         *out_n = n;
672         *saddr = fl4->saddr;
673         *out_ttl = ttl;
674         *out_dev = rt->dst.dev;
675
676         return 0;
677 }
678
679 static int gen_vxlan_header_ipv4(struct net_device *out_dev,
680                                  char buf[],
681                                  unsigned char h_dest[ETH_ALEN],
682                                  int ttl,
683                                  __be32 daddr,
684                                  __be32 saddr,
685                                  __be16 udp_dst_port,
686                                  __be32 vx_vni)
687 {
688         int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
689         struct ethhdr *eth = (struct ethhdr *)buf;
690         struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
691         struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
692         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
693
694         memset(buf, 0, encap_size);
695
696         ether_addr_copy(eth->h_dest, h_dest);
697         ether_addr_copy(eth->h_source, out_dev->dev_addr);
698         eth->h_proto = htons(ETH_P_IP);
699
700         ip->daddr = daddr;
701         ip->saddr = saddr;
702
703         ip->ttl = ttl;
704         ip->protocol = IPPROTO_UDP;
705         ip->version = 0x4;
706         ip->ihl = 0x5;
707
708         udp->dest = udp_dst_port;
709         vxh->vx_flags = VXLAN_HF_VNI;
710         vxh->vx_vni = vxlan_vni_field(vx_vni);
711
712         return encap_size;
713 }
714
715 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
716                                           struct net_device *mirred_dev,
717                                           struct mlx5_encap_entry *e,
718                                           struct net_device **out_dev)
719 {
720         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
721         struct flowi4 fl4 = {};
722         struct neighbour *n;
723         char *encap_header;
724         int encap_size;
725         __be32 saddr;
726         int ttl;
727         int err;
728
729         encap_header = kzalloc(max_encap_size, GFP_KERNEL);
730         if (!encap_header)
731                 return -ENOMEM;
732
733         switch (e->tunnel_type) {
734         case MLX5_HEADER_TYPE_VXLAN:
735                 fl4.flowi4_proto = IPPROTO_UDP;
736                 fl4.fl4_dport = e->tun_info.tp_dst;
737                 break;
738         default:
739                 err = -EOPNOTSUPP;
740                 goto out;
741         }
742         fl4.daddr = e->tun_info.daddr;
743
744         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
745                                       &fl4, &n, &saddr, &ttl);
746         if (err)
747                 goto out;
748
749         e->n = n;
750         e->out_dev = *out_dev;
751
752         if (!(n->nud_state & NUD_VALID)) {
753                 err = -ENOTSUPP;
754                 goto out;
755         }
756
757         neigh_ha_snapshot(e->h_dest, n, *out_dev);
758
759         switch (e->tunnel_type) {
760         case MLX5_HEADER_TYPE_VXLAN:
761                 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
762                                                    e->h_dest, ttl,
763                                                    e->tun_info.daddr,
764                                                    saddr, e->tun_info.tp_dst,
765                                                    e->tun_info.tun_id);
766                 break;
767         default:
768                 err = -EOPNOTSUPP;
769                 goto out;
770         }
771
772         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
773                                encap_size, encap_header, &e->encap_id);
774 out:
775         kfree(encap_header);
776         return err;
777 }
778
779 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
780                               struct ip_tunnel_info *tun_info,
781                               struct net_device *mirred_dev,
782                               struct mlx5_esw_flow_attr *attr)
783 {
784         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
785         unsigned short family = ip_tunnel_info_af(tun_info);
786         struct ip_tunnel_key *key = &tun_info->key;
787         struct mlx5_encap_info info;
788         struct mlx5_encap_entry *e;
789         struct net_device *out_dev;
790         uintptr_t hash_key;
791         bool found = false;
792         int tunnel_type;
793         int err;
794
795         /* udp dst port must be given */
796         if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
797                 return -EOPNOTSUPP;
798
799         /* setting udp src port isn't supported */
800         if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src)))
801                 return -EOPNOTSUPP;
802
803         if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
804             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
805                 info.tp_dst = key->tp_dst;
806                 info.tun_id = tunnel_id_to_key32(key->tun_id);
807                 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
808         } else {
809                 return -EOPNOTSUPP;
810         }
811
812         switch (family) {
813         case AF_INET:
814                 info.daddr = key->u.ipv4.dst;
815                 break;
816         default:
817                 return -EOPNOTSUPP;
818         }
819
820         hash_key = hash_encap_info(&info);
821
822         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
823                                    encap_hlist, hash_key) {
824                 if (!cmp_encap_info(&e->tun_info, &info)) {
825                         found = true;
826                         break;
827                 }
828         }
829
830         if (found) {
831                 attr->encap = e;
832                 return 0;
833         }
834
835         e = kzalloc(sizeof(*e), GFP_KERNEL);
836         if (!e)
837                 return -ENOMEM;
838
839         e->tun_info = info;
840         e->tunnel_type = tunnel_type;
841         INIT_LIST_HEAD(&e->flows);
842
843         err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
844         if (err)
845                 goto out_err;
846
847         attr->encap = e;
848         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
849
850         return err;
851
852 out_err:
853         kfree(e);
854         return err;
855 }
856
857 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
858                                 struct mlx5e_tc_flow *flow)
859 {
860         struct mlx5_esw_flow_attr *attr = flow->attr;
861         struct ip_tunnel_info *info = NULL;
862         const struct tc_action *a;
863         LIST_HEAD(actions);
864         bool encap = false;
865         int err;
866
867         if (tc_no_actions(exts))
868                 return -EINVAL;
869
870         memset(attr, 0, sizeof(*attr));
871         attr->in_rep = priv->ppriv;
872
873         tcf_exts_to_list(exts, &actions);
874         list_for_each_entry(a, &actions, list) {
875                 if (is_tcf_gact_shot(a)) {
876                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
877                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
878                         continue;
879                 }
880
881                 if (is_tcf_mirred_egress_redirect(a)) {
882                         int ifindex = tcf_mirred_ifindex(a);
883                         struct net_device *out_dev;
884                         struct mlx5e_priv *out_priv;
885
886                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
887
888                         if (switchdev_port_same_parent_id(priv->netdev,
889                                                           out_dev)) {
890                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
891                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
892                                 out_priv = netdev_priv(out_dev);
893                                 attr->out_rep = out_priv->ppriv;
894                         } else if (encap) {
895                                 err = mlx5e_attach_encap(priv, info,
896                                                          out_dev, attr);
897                                 if (err)
898                                         return err;
899                                 list_add(&flow->encap, &attr->encap->flows);
900                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
901                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
902                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
903                                 out_priv = netdev_priv(attr->encap->out_dev);
904                                 attr->out_rep = out_priv->ppriv;
905                         } else {
906                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
907                                        priv->netdev->name, out_dev->name);
908                                 return -EINVAL;
909                         }
910                         continue;
911                 }
912
913                 if (is_tcf_tunnel_set(a)) {
914                         info = tcf_tunnel_info(a);
915                         if (info)
916                                 encap = true;
917                         else
918                                 return -EOPNOTSUPP;
919                         continue;
920                 }
921
922                 if (is_tcf_vlan(a)) {
923                         if (tcf_vlan_action(a) == VLAN_F_POP) {
924                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
925                         } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
926                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
927                                         return -EOPNOTSUPP;
928
929                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
930                                 attr->vlan = tcf_vlan_push_vid(a);
931                         }
932                         continue;
933                 }
934
935                 if (is_tcf_tunnel_release(a)) {
936                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
937                         continue;
938                 }
939
940                 return -EINVAL;
941         }
942         return 0;
943 }
944
945 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
946                            struct tc_cls_flower_offload *f)
947 {
948         struct mlx5e_tc_table *tc = &priv->fs.tc;
949         int err = 0;
950         bool fdb_flow = false;
951         u32 flow_tag, action;
952         struct mlx5e_tc_flow *flow;
953         struct mlx5_flow_spec *spec;
954         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
955
956         if (esw && esw->mode == SRIOV_OFFLOADS)
957                 fdb_flow = true;
958
959         if (fdb_flow)
960                 flow = kzalloc(sizeof(*flow) +
961                                sizeof(struct mlx5_esw_flow_attr),
962                                GFP_KERNEL);
963         else
964                 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
965
966         spec = mlx5_vzalloc(sizeof(*spec));
967         if (!spec || !flow) {
968                 err = -ENOMEM;
969                 goto err_free;
970         }
971
972         flow->cookie = f->cookie;
973
974         err = parse_cls_flower(priv, spec, f);
975         if (err < 0)
976                 goto err_free;
977
978         if (fdb_flow) {
979                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
980                 err = parse_tc_fdb_actions(priv, f->exts, flow);
981                 if (err < 0)
982                         goto err_free;
983                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
984         } else {
985                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
986                 if (err < 0)
987                         goto err_free;
988                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
989         }
990
991         if (IS_ERR(flow->rule)) {
992                 err = PTR_ERR(flow->rule);
993                 goto err_free;
994         }
995
996         err = rhashtable_insert_fast(&tc->ht, &flow->node,
997                                      tc->ht_params);
998         if (err)
999                 goto err_del_rule;
1000
1001         goto out;
1002
1003 err_del_rule:
1004         mlx5_del_flow_rules(flow->rule);
1005
1006 err_free:
1007         kfree(flow);
1008 out:
1009         kvfree(spec);
1010         return err;
1011 }
1012
1013 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1014                         struct tc_cls_flower_offload *f)
1015 {
1016         struct mlx5e_tc_flow *flow;
1017         struct mlx5e_tc_table *tc = &priv->fs.tc;
1018
1019         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1020                                       tc->ht_params);
1021         if (!flow)
1022                 return -EINVAL;
1023
1024         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1025
1026         mlx5e_tc_del_flow(priv, flow);
1027
1028
1029         kfree(flow);
1030
1031         return 0;
1032 }
1033
1034 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1035                        struct tc_cls_flower_offload *f)
1036 {
1037         struct mlx5e_tc_table *tc = &priv->fs.tc;
1038         struct mlx5e_tc_flow *flow;
1039         struct tc_action *a;
1040         struct mlx5_fc *counter;
1041         LIST_HEAD(actions);
1042         u64 bytes;
1043         u64 packets;
1044         u64 lastuse;
1045
1046         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1047                                       tc->ht_params);
1048         if (!flow)
1049                 return -EINVAL;
1050
1051         counter = mlx5_flow_rule_counter(flow->rule);
1052         if (!counter)
1053                 return 0;
1054
1055         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1056
1057         tcf_exts_to_list(f->exts, &actions);
1058         list_for_each_entry(a, &actions, list)
1059                 tcf_action_stats_update(a, bytes, packets, lastuse);
1060
1061         return 0;
1062 }
1063
1064 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1065         .head_offset = offsetof(struct mlx5e_tc_flow, node),
1066         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1067         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1068         .automatic_shrinking = true,
1069 };
1070
1071 int mlx5e_tc_init(struct mlx5e_priv *priv)
1072 {
1073         struct mlx5e_tc_table *tc = &priv->fs.tc;
1074
1075         tc->ht_params = mlx5e_tc_flow_ht_params;
1076         return rhashtable_init(&tc->ht, &tc->ht_params);
1077 }
1078
1079 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1080 {
1081         struct mlx5e_tc_flow *flow = ptr;
1082         struct mlx5e_priv *priv = arg;
1083
1084         mlx5e_tc_del_flow(priv, flow);
1085         kfree(flow);
1086 }
1087
1088 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1089 {
1090         struct mlx5e_tc_table *tc = &priv->fs.tc;
1091
1092         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1093
1094         if (!IS_ERR_OR_NULL(tc->t)) {
1095                 mlx5_destroy_flow_table(tc->t);
1096                 tc->t = NULL;
1097         }
1098 }