]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc_tun.c
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies. */
3
4 #include <net/vxlan.h>
5 #include <net/gre.h>
6 #include "lib/vxlan.h"
7 #include "en/tc_tun.h"
8
9 static int get_route_and_out_devs(struct mlx5e_priv *priv,
10                                   struct net_device *dev,
11                                   struct net_device **route_dev,
12                                   struct net_device **out_dev)
13 {
14         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
15         struct net_device *uplink_dev, *uplink_upper;
16         bool dst_is_lag_dev;
17
18         uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
19         uplink_upper = netdev_master_upper_dev_get(uplink_dev);
20         dst_is_lag_dev = (uplink_upper &&
21                           netif_is_lag_master(uplink_upper) &&
22                           dev == uplink_upper &&
23                           mlx5_lag_is_sriov(priv->mdev));
24
25         /* if the egress device isn't on the same HW e-switch or
26          * it's a LAG device, use the uplink
27          */
28         if (!netdev_port_same_parent_id(priv->netdev, dev) ||
29             dst_is_lag_dev) {
30                 *route_dev = uplink_dev;
31                 *out_dev = *route_dev;
32         } else {
33                 *route_dev = dev;
34                 if (is_vlan_dev(*route_dev))
35                         *out_dev = uplink_dev;
36                 else if (mlx5e_eswitch_rep(dev))
37                         *out_dev = *route_dev;
38                 else
39                         return -EOPNOTSUPP;
40         }
41
42         if (!(mlx5e_eswitch_rep(*out_dev) &&
43               mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
44                 return -EOPNOTSUPP;
45
46         return 0;
47 }
48
49 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
50                                    struct net_device *mirred_dev,
51                                    struct net_device **out_dev,
52                                    struct net_device **route_dev,
53                                    struct flowi4 *fl4,
54                                    struct neighbour **out_n,
55                                    u8 *out_ttl)
56 {
57         struct rtable *rt;
58         struct neighbour *n = NULL;
59
60 #if IS_ENABLED(CONFIG_INET)
61         struct mlx5_core_dev *mdev = priv->mdev;
62         struct net_device *uplink_dev;
63         int ret;
64
65         if (mlx5_lag_is_multipath(mdev)) {
66                 struct mlx5_eswitch *esw = mdev->priv.eswitch;
67
68                 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
69                 fl4->flowi4_oif = uplink_dev->ifindex;
70         }
71
72         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
73         ret = PTR_ERR_OR_ZERO(rt);
74         if (ret)
75                 return ret;
76
77         if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
78                 return -ENETUNREACH;
79 #else
80         return -EOPNOTSUPP;
81 #endif
82
83         ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
84         if (ret < 0)
85                 return ret;
86
87         if (!(*out_ttl))
88                 *out_ttl = ip4_dst_hoplimit(&rt->dst);
89         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
90         ip_rt_put(rt);
91         if (!n)
92                 return -ENOMEM;
93
94         *out_n = n;
95         return 0;
96 }
97
98 static const char *mlx5e_netdev_kind(struct net_device *dev)
99 {
100         if (dev->rtnl_link_ops)
101                 return dev->rtnl_link_ops->kind;
102         else
103                 return "unknown";
104 }
105
106 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
107                                    struct net_device *mirred_dev,
108                                    struct net_device **out_dev,
109                                    struct net_device **route_dev,
110                                    struct flowi6 *fl6,
111                                    struct neighbour **out_n,
112                                    u8 *out_ttl)
113 {
114         struct neighbour *n = NULL;
115         struct dst_entry *dst;
116
117 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
118         int ret;
119
120         ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
121                                          fl6);
122         if (ret < 0)
123                 return ret;
124
125         if (!(*out_ttl))
126                 *out_ttl = ip6_dst_hoplimit(dst);
127
128         ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
129         if (ret < 0)
130                 return ret;
131 #else
132         return -EOPNOTSUPP;
133 #endif
134
135         n = dst_neigh_lookup(dst, &fl6->daddr);
136         dst_release(dst);
137         if (!n)
138                 return -ENOMEM;
139
140         *out_n = n;
141         return 0;
142 }
143
144 static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
145 {
146         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
147         struct udphdr *udp = (struct udphdr *)(buf);
148         struct vxlanhdr *vxh = (struct vxlanhdr *)
149                                ((char *)udp + sizeof(struct udphdr));
150
151         udp->dest = tun_key->tp_dst;
152         vxh->vx_flags = VXLAN_HF_VNI;
153         vxh->vx_vni = vxlan_vni_field(tun_id);
154
155         return 0;
156 }
157
158 static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
159 {
160         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
161         int hdr_len;
162         struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
163
164         /* the HW does not calculate GRE csum or sequences */
165         if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
166                 return -EOPNOTSUPP;
167
168         greh->protocol = htons(ETH_P_TEB);
169
170         /* GRE key */
171         hdr_len = gre_calc_hlen(tun_key->tun_flags);
172         greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
173         if (tun_key->tun_flags & TUNNEL_KEY) {
174                 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
175
176                 *ptr = tun_id;
177         }
178
179         return 0;
180 }
181
182 static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
183                                       struct mlx5e_encap_entry *e)
184 {
185         int err = 0;
186         struct ip_tunnel_key *key = &e->tun_info.key;
187
188         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
189                 *ip_proto = IPPROTO_UDP;
190                 err = mlx5e_gen_vxlan_header(buf, key);
191         } else if  (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
192                 *ip_proto = IPPROTO_GRE;
193                 err = mlx5e_gen_gre_header(buf, key);
194         } else {
195                 pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
196                         , e->tunnel_type);
197                 err = -EOPNOTSUPP;
198         }
199
200         return err;
201 }
202
203 static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
204                              struct mlx5e_encap_entry *e,
205                              u16 proto)
206 {
207         struct ethhdr *eth = (struct ethhdr *)buf;
208         char *ip;
209
210         ether_addr_copy(eth->h_dest, e->h_dest);
211         ether_addr_copy(eth->h_source, dev->dev_addr);
212         if (is_vlan_dev(dev)) {
213                 struct vlan_hdr *vlan = (struct vlan_hdr *)
214                                         ((char *)eth + ETH_HLEN);
215                 ip = (char *)vlan + VLAN_HLEN;
216                 eth->h_proto = vlan_dev_vlan_proto(dev);
217                 vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
218                 vlan->h_vlan_encapsulated_proto = htons(proto);
219         } else {
220                 eth->h_proto = htons(proto);
221                 ip = (char *)eth + ETH_HLEN;
222         }
223
224         return ip;
225 }
226
227 int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
228                                     struct net_device *mirred_dev,
229                                     struct mlx5e_encap_entry *e)
230 {
231         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
232         struct ip_tunnel_key *tun_key = &e->tun_info.key;
233         struct net_device *out_dev, *route_dev;
234         struct neighbour *n = NULL;
235         struct flowi4 fl4 = {};
236         int ipv4_encap_size;
237         char *encap_header;
238         u8 nud_state, ttl;
239         struct iphdr *ip;
240         int err;
241
242         /* add the IP fields */
243         fl4.flowi4_tos = tun_key->tos;
244         fl4.daddr = tun_key->u.ipv4.dst;
245         fl4.saddr = tun_key->u.ipv4.src;
246         ttl = tun_key->ttl;
247
248         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
249                                       &fl4, &n, &ttl);
250         if (err)
251                 return err;
252
253         ipv4_encap_size =
254                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
255                 sizeof(struct iphdr) +
256                 e->tunnel_hlen;
257
258         if (max_encap_size < ipv4_encap_size) {
259                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
260                                ipv4_encap_size, max_encap_size);
261                 return -EOPNOTSUPP;
262         }
263
264         encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
265         if (!encap_header)
266                 return -ENOMEM;
267
268         /* used by mlx5e_detach_encap to lookup a neigh hash table
269          * entry in the neigh hash table when a user deletes a rule
270          */
271         e->m_neigh.dev = n->dev;
272         e->m_neigh.family = n->ops->family;
273         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
274         e->out_dev = out_dev;
275         e->route_dev = route_dev;
276
277         /* It's important to add the neigh to the hash table before checking
278          * the neigh validity state. So if we'll get a notification, in case the
279          * neigh changes it's validity state, we would find the relevant neigh
280          * in the hash.
281          */
282         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
283         if (err)
284                 goto free_encap;
285
286         read_lock_bh(&n->lock);
287         nud_state = n->nud_state;
288         ether_addr_copy(e->h_dest, n->ha);
289         read_unlock_bh(&n->lock);
290
291         /* add ethernet header */
292         ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
293                                              ETH_P_IP);
294
295         /* add ip header */
296         ip->tos = tun_key->tos;
297         ip->version = 0x4;
298         ip->ihl = 0x5;
299         ip->ttl = ttl;
300         ip->daddr = fl4.daddr;
301         ip->saddr = fl4.saddr;
302
303         /* add tunneling protocol header */
304         err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
305                                          &ip->protocol, e);
306         if (err)
307                 goto destroy_neigh_entry;
308
309         e->encap_size = ipv4_encap_size;
310         e->encap_header = encap_header;
311
312         if (!(nud_state & NUD_VALID)) {
313                 neigh_event_send(n, NULL);
314                 /* the encap entry will be made valid on neigh update event
315                  * and not used before that.
316                  */
317                 goto out;
318         }
319
320         err = mlx5_packet_reformat_alloc(priv->mdev,
321                                          e->reformat_type,
322                                          ipv4_encap_size, encap_header,
323                                          MLX5_FLOW_NAMESPACE_FDB,
324                                          &e->encap_id);
325         if (err)
326                 goto destroy_neigh_entry;
327
328         e->flags |= MLX5_ENCAP_ENTRY_VALID;
329         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
330         neigh_release(n);
331         return err;
332
333 destroy_neigh_entry:
334         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
335 free_encap:
336         kfree(encap_header);
337 out:
338         if (n)
339                 neigh_release(n);
340         return err;
341 }
342
343 int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
344                                     struct net_device *mirred_dev,
345                                     struct mlx5e_encap_entry *e)
346 {
347         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
348         struct ip_tunnel_key *tun_key = &e->tun_info.key;
349         struct net_device *out_dev, *route_dev;
350         struct neighbour *n = NULL;
351         struct flowi6 fl6 = {};
352         struct ipv6hdr *ip6h;
353         int ipv6_encap_size;
354         char *encap_header;
355         u8 nud_state, ttl;
356         int err;
357
358         ttl = tun_key->ttl;
359
360         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
361         fl6.daddr = tun_key->u.ipv6.dst;
362         fl6.saddr = tun_key->u.ipv6.src;
363
364         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
365                                       &fl6, &n, &ttl);
366         if (err)
367                 return err;
368
369         ipv6_encap_size =
370                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
371                 sizeof(struct ipv6hdr) +
372                 e->tunnel_hlen;
373
374         if (max_encap_size < ipv6_encap_size) {
375                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
376                                ipv6_encap_size, max_encap_size);
377                 return -EOPNOTSUPP;
378         }
379
380         encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
381         if (!encap_header)
382                 return -ENOMEM;
383
384         /* used by mlx5e_detach_encap to lookup a neigh hash table
385          * entry in the neigh hash table when a user deletes a rule
386          */
387         e->m_neigh.dev = n->dev;
388         e->m_neigh.family = n->ops->family;
389         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
390         e->out_dev = out_dev;
391         e->route_dev = route_dev;
392
393         /* It's importent to add the neigh to the hash table before checking
394          * the neigh validity state. So if we'll get a notification, in case the
395          * neigh changes it's validity state, we would find the relevant neigh
396          * in the hash.
397          */
398         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
399         if (err)
400                 goto free_encap;
401
402         read_lock_bh(&n->lock);
403         nud_state = n->nud_state;
404         ether_addr_copy(e->h_dest, n->ha);
405         read_unlock_bh(&n->lock);
406
407         /* add ethernet header */
408         ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
409                                                  ETH_P_IPV6);
410
411         /* add ip header */
412         ip6_flow_hdr(ip6h, tun_key->tos, 0);
413         /* the HW fills up ipv6 payload len */
414         ip6h->hop_limit   = ttl;
415         ip6h->daddr       = fl6.daddr;
416         ip6h->saddr       = fl6.saddr;
417
418         /* add tunneling protocol header */
419         err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
420                                          &ip6h->nexthdr, e);
421         if (err)
422                 goto destroy_neigh_entry;
423
424         e->encap_size = ipv6_encap_size;
425         e->encap_header = encap_header;
426
427         if (!(nud_state & NUD_VALID)) {
428                 neigh_event_send(n, NULL);
429                 /* the encap entry will be made valid on neigh update event
430                  * and not used before that.
431                  */
432                 goto out;
433         }
434
435         err = mlx5_packet_reformat_alloc(priv->mdev,
436                                          e->reformat_type,
437                                          ipv6_encap_size, encap_header,
438                                          MLX5_FLOW_NAMESPACE_FDB,
439                                          &e->encap_id);
440         if (err)
441                 goto destroy_neigh_entry;
442
443         e->flags |= MLX5_ENCAP_ENTRY_VALID;
444         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
445         neigh_release(n);
446         return err;
447
448 destroy_neigh_entry:
449         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
450 free_encap:
451         kfree(encap_header);
452 out:
453         if (n)
454                 neigh_release(n);
455         return err;
456 }
457
458 int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
459 {
460         if (netif_is_vxlan(tunnel_dev))
461                 return MLX5E_TC_TUNNEL_TYPE_VXLAN;
462         else if (netif_is_gretap(tunnel_dev) ||
463                  netif_is_ip6gretap(tunnel_dev))
464                 return MLX5E_TC_TUNNEL_TYPE_GRETAP;
465         else
466                 return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
467 }
468
469 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
470                                     struct net_device *netdev)
471 {
472         int tunnel_type = mlx5e_tc_tun_get_type(netdev);
473
474         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
475             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
476                 return true;
477         else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
478                  MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
479                 return true;
480         else
481                 return false;
482 }
483
484 int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
485                                  struct mlx5e_priv *priv,
486                                  struct mlx5e_encap_entry *e,
487                                  struct netlink_ext_ack *extack)
488 {
489         e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
490
491         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
492                 int dst_port =  be16_to_cpu(e->tun_info.key.tp_dst);
493
494                 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
495                         NL_SET_ERR_MSG_MOD(extack,
496                                            "vxlan udp dport was not registered with the HW");
497                         netdev_warn(priv->netdev,
498                                     "%d isn't an offloaded vxlan udp dport\n",
499                                     dst_port);
500                         return -EOPNOTSUPP;
501                 }
502                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
503                 e->tunnel_hlen = VXLAN_HLEN;
504         } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
505                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
506                 e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
507         } else {
508                 e->reformat_type = -1;
509                 e->tunnel_hlen = -1;
510                 return -EOPNOTSUPP;
511         }
512         return 0;
513 }
514
515 static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
516                                     struct mlx5_flow_spec *spec,
517                                     struct tc_cls_flower_offload *f,
518                                     void *headers_c,
519                                     void *headers_v)
520 {
521         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
522         struct netlink_ext_ack *extack = f->common.extack;
523         void *misc_c = MLX5_ADDR_OF(fte_match_param,
524                                     spec->match_criteria,
525                                     misc_parameters);
526         void *misc_v = MLX5_ADDR_OF(fte_match_param,
527                                     spec->match_value,
528                                     misc_parameters);
529         struct flow_match_ports enc_ports;
530
531         flow_rule_match_enc_ports(rule, &enc_ports);
532
533         /* Full udp dst port must be given */
534         if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
535             memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
536                 NL_SET_ERR_MSG_MOD(extack,
537                                    "VXLAN decap filter must include enc_dst_port condition");
538                 netdev_warn(priv->netdev,
539                             "VXLAN decap filter must include enc_dst_port condition\n");
540                 return -EOPNOTSUPP;
541         }
542
543         /* udp dst port must be knonwn as a VXLAN port */
544         if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
545                 NL_SET_ERR_MSG_MOD(extack,
546                                    "Matched UDP port is not registered as a VXLAN port");
547                 netdev_warn(priv->netdev,
548                             "UDP port %d is not registered as a VXLAN port\n",
549                             be16_to_cpu(enc_ports.key->dst));
550                 return -EOPNOTSUPP;
551         }
552
553         /* dst UDP port is valid here */
554         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
555         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
556
557         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
558                  ntohs(enc_ports.mask->dst));
559         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
560                  ntohs(enc_ports.key->dst));
561
562         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
563                  ntohs(enc_ports.mask->src));
564         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
565                  ntohs(enc_ports.key->src));
566
567         /* match on VNI */
568         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
569                 struct flow_match_enc_keyid enc_keyid;
570
571                 flow_rule_match_enc_keyid(rule, &enc_keyid);
572
573                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
574                          be32_to_cpu(enc_keyid.mask->keyid));
575                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
576                          be32_to_cpu(enc_keyid.key->keyid));
577         }
578         return 0;
579 }
580
581 static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
582                                      struct mlx5_flow_spec *spec,
583                                      struct tc_cls_flower_offload *f,
584                                      void *outer_headers_c,
585                                      void *outer_headers_v)
586 {
587         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
588                                     misc_parameters);
589         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
590                                     misc_parameters);
591         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
592
593         if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
594                 NL_SET_ERR_MSG_MOD(f->common.extack,
595                                    "GRE HW offloading is not supported");
596                 netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
597                 return -EOPNOTSUPP;
598         }
599
600         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
601         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
602                  ip_protocol, IPPROTO_GRE);
603
604         /* gre protocol*/
605         MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
606         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
607
608         /* gre key */
609         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
610                 struct flow_match_enc_keyid enc_keyid;
611
612                 flow_rule_match_enc_keyid(rule, &enc_keyid);
613                 MLX5_SET(fte_match_set_misc, misc_c,
614                          gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
615                 MLX5_SET(fte_match_set_misc, misc_v,
616                          gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
617         }
618
619         return 0;
620 }
621
622 int mlx5e_tc_tun_parse(struct net_device *filter_dev,
623                        struct mlx5e_priv *priv,
624                        struct mlx5_flow_spec *spec,
625                        struct tc_cls_flower_offload *f,
626                        void *headers_c,
627                        void *headers_v, u8 *match_level)
628 {
629         int tunnel_type;
630         int err = 0;
631
632         tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
633         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
634                 *match_level = MLX5_MATCH_L4;
635                 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
636                                                headers_c, headers_v);
637         } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
638                 *match_level = MLX5_MATCH_L3;
639                 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
640                                                 headers_c, headers_v);
641         } else {
642                 netdev_warn(priv->netdev,
643                             "decapsulation offload is not supported for %s (kind: \"%s\")\n",
644                             netdev_name(filter_dev),
645                             mlx5e_netdev_kind(filter_dev));
646
647                 return -EOPNOTSUPP;
648         }
649         return err;
650 }