]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
Merge tag 'tag-chrome-platform-for-v5.1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc_tun.c
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies. */
3
4 #include <net/vxlan.h>
5 #include <net/gre.h>
6 #include "lib/vxlan.h"
7 #include "en/tc_tun.h"
8
9 static int get_route_and_out_devs(struct mlx5e_priv *priv,
10                                   struct net_device *dev,
11                                   struct net_device **route_dev,
12                                   struct net_device **out_dev)
13 {
14         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
15         struct net_device *uplink_dev, *uplink_upper;
16         bool dst_is_lag_dev;
17
18         uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
19         uplink_upper = netdev_master_upper_dev_get(uplink_dev);
20         dst_is_lag_dev = (uplink_upper &&
21                           netif_is_lag_master(uplink_upper) &&
22                           dev == uplink_upper &&
23                           mlx5_lag_is_sriov(priv->mdev));
24
25         /* if the egress device isn't on the same HW e-switch or
26          * it's a LAG device, use the uplink
27          */
28         if (!netdev_port_same_parent_id(priv->netdev, dev) ||
29             dst_is_lag_dev) {
30                 *route_dev = uplink_dev;
31                 *out_dev = *route_dev;
32         } else {
33                 *route_dev = dev;
34                 if (is_vlan_dev(*route_dev))
35                         *out_dev = uplink_dev;
36                 else if (mlx5e_eswitch_rep(dev))
37                         *out_dev = *route_dev;
38                 else
39                         return -EOPNOTSUPP;
40         }
41
42         return 0;
43 }
44
45 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
46                                    struct net_device *mirred_dev,
47                                    struct net_device **out_dev,
48                                    struct net_device **route_dev,
49                                    struct flowi4 *fl4,
50                                    struct neighbour **out_n,
51                                    u8 *out_ttl)
52 {
53         struct rtable *rt;
54         struct neighbour *n = NULL;
55
56 #if IS_ENABLED(CONFIG_INET)
57         struct mlx5_core_dev *mdev = priv->mdev;
58         struct net_device *uplink_dev;
59         int ret;
60
61         if (mlx5_lag_is_multipath(mdev)) {
62                 struct mlx5_eswitch *esw = mdev->priv.eswitch;
63
64                 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
65                 fl4->flowi4_oif = uplink_dev->ifindex;
66         }
67
68         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
69         ret = PTR_ERR_OR_ZERO(rt);
70         if (ret)
71                 return ret;
72
73         if (mlx5_lag_is_multipath(mdev) && !rt->rt_gateway)
74                 return -ENETUNREACH;
75 #else
76         return -EOPNOTSUPP;
77 #endif
78
79         ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
80         if (ret < 0)
81                 return ret;
82
83         if (!(*out_ttl))
84                 *out_ttl = ip4_dst_hoplimit(&rt->dst);
85         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
86         ip_rt_put(rt);
87         if (!n)
88                 return -ENOMEM;
89
90         *out_n = n;
91         return 0;
92 }
93
94 static const char *mlx5e_netdev_kind(struct net_device *dev)
95 {
96         if (dev->rtnl_link_ops)
97                 return dev->rtnl_link_ops->kind;
98         else
99                 return "";
100 }
101
102 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
103                                    struct net_device *mirred_dev,
104                                    struct net_device **out_dev,
105                                    struct net_device **route_dev,
106                                    struct flowi6 *fl6,
107                                    struct neighbour **out_n,
108                                    u8 *out_ttl)
109 {
110         struct neighbour *n = NULL;
111         struct dst_entry *dst;
112
113 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
114         int ret;
115
116         ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
117                                          fl6);
118         if (ret < 0)
119                 return ret;
120
121         if (!(*out_ttl))
122                 *out_ttl = ip6_dst_hoplimit(dst);
123
124         ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
125         if (ret < 0)
126                 return ret;
127 #else
128         return -EOPNOTSUPP;
129 #endif
130
131         n = dst_neigh_lookup(dst, &fl6->daddr);
132         dst_release(dst);
133         if (!n)
134                 return -ENOMEM;
135
136         *out_n = n;
137         return 0;
138 }
139
140 static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
141 {
142         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
143         struct udphdr *udp = (struct udphdr *)(buf);
144         struct vxlanhdr *vxh = (struct vxlanhdr *)
145                                ((char *)udp + sizeof(struct udphdr));
146
147         udp->dest = tun_key->tp_dst;
148         vxh->vx_flags = VXLAN_HF_VNI;
149         vxh->vx_vni = vxlan_vni_field(tun_id);
150
151         return 0;
152 }
153
154 static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
155 {
156         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
157         int hdr_len;
158         struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
159
160         /* the HW does not calculate GRE csum or sequences */
161         if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
162                 return -EOPNOTSUPP;
163
164         greh->protocol = htons(ETH_P_TEB);
165
166         /* GRE key */
167         hdr_len = gre_calc_hlen(tun_key->tun_flags);
168         greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
169         if (tun_key->tun_flags & TUNNEL_KEY) {
170                 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
171
172                 *ptr = tun_id;
173         }
174
175         return 0;
176 }
177
178 static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
179                                       struct mlx5e_encap_entry *e)
180 {
181         int err = 0;
182         struct ip_tunnel_key *key = &e->tun_info.key;
183
184         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
185                 *ip_proto = IPPROTO_UDP;
186                 err = mlx5e_gen_vxlan_header(buf, key);
187         } else if  (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
188                 *ip_proto = IPPROTO_GRE;
189                 err = mlx5e_gen_gre_header(buf, key);
190         } else {
191                 pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
192                         , e->tunnel_type);
193                 err = -EOPNOTSUPP;
194         }
195
196         return err;
197 }
198
199 static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
200                              struct mlx5e_encap_entry *e,
201                              u16 proto)
202 {
203         struct ethhdr *eth = (struct ethhdr *)buf;
204         char *ip;
205
206         ether_addr_copy(eth->h_dest, e->h_dest);
207         ether_addr_copy(eth->h_source, dev->dev_addr);
208         if (is_vlan_dev(dev)) {
209                 struct vlan_hdr *vlan = (struct vlan_hdr *)
210                                         ((char *)eth + ETH_HLEN);
211                 ip = (char *)vlan + VLAN_HLEN;
212                 eth->h_proto = vlan_dev_vlan_proto(dev);
213                 vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
214                 vlan->h_vlan_encapsulated_proto = htons(proto);
215         } else {
216                 eth->h_proto = htons(proto);
217                 ip = (char *)eth + ETH_HLEN;
218         }
219
220         return ip;
221 }
222
223 int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
224                                     struct net_device *mirred_dev,
225                                     struct mlx5e_encap_entry *e)
226 {
227         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
228         struct ip_tunnel_key *tun_key = &e->tun_info.key;
229         struct net_device *out_dev, *route_dev;
230         struct neighbour *n = NULL;
231         struct flowi4 fl4 = {};
232         int ipv4_encap_size;
233         char *encap_header;
234         u8 nud_state, ttl;
235         struct iphdr *ip;
236         int err;
237
238         /* add the IP fields */
239         fl4.flowi4_tos = tun_key->tos;
240         fl4.daddr = tun_key->u.ipv4.dst;
241         fl4.saddr = tun_key->u.ipv4.src;
242         ttl = tun_key->ttl;
243
244         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
245                                       &fl4, &n, &ttl);
246         if (err)
247                 return err;
248
249         ipv4_encap_size =
250                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
251                 sizeof(struct iphdr) +
252                 e->tunnel_hlen;
253
254         if (max_encap_size < ipv4_encap_size) {
255                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
256                                ipv4_encap_size, max_encap_size);
257                 return -EOPNOTSUPP;
258         }
259
260         encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
261         if (!encap_header)
262                 return -ENOMEM;
263
264         /* used by mlx5e_detach_encap to lookup a neigh hash table
265          * entry in the neigh hash table when a user deletes a rule
266          */
267         e->m_neigh.dev = n->dev;
268         e->m_neigh.family = n->ops->family;
269         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
270         e->out_dev = out_dev;
271         e->route_dev = route_dev;
272
273         /* It's important to add the neigh to the hash table before checking
274          * the neigh validity state. So if we'll get a notification, in case the
275          * neigh changes it's validity state, we would find the relevant neigh
276          * in the hash.
277          */
278         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
279         if (err)
280                 goto free_encap;
281
282         read_lock_bh(&n->lock);
283         nud_state = n->nud_state;
284         ether_addr_copy(e->h_dest, n->ha);
285         read_unlock_bh(&n->lock);
286
287         /* add ethernet header */
288         ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
289                                              ETH_P_IP);
290
291         /* add ip header */
292         ip->tos = tun_key->tos;
293         ip->version = 0x4;
294         ip->ihl = 0x5;
295         ip->ttl = ttl;
296         ip->daddr = fl4.daddr;
297         ip->saddr = fl4.saddr;
298
299         /* add tunneling protocol header */
300         err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
301                                          &ip->protocol, e);
302         if (err)
303                 goto destroy_neigh_entry;
304
305         e->encap_size = ipv4_encap_size;
306         e->encap_header = encap_header;
307
308         if (!(nud_state & NUD_VALID)) {
309                 neigh_event_send(n, NULL);
310                 /* the encap entry will be made valid on neigh update event
311                  * and not used before that.
312                  */
313                 goto out;
314         }
315
316         err = mlx5_packet_reformat_alloc(priv->mdev,
317                                          e->reformat_type,
318                                          ipv4_encap_size, encap_header,
319                                          MLX5_FLOW_NAMESPACE_FDB,
320                                          &e->encap_id);
321         if (err)
322                 goto destroy_neigh_entry;
323
324         e->flags |= MLX5_ENCAP_ENTRY_VALID;
325         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
326         neigh_release(n);
327         return err;
328
329 destroy_neigh_entry:
330         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
331 free_encap:
332         kfree(encap_header);
333 out:
334         if (n)
335                 neigh_release(n);
336         return err;
337 }
338
339 int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
340                                     struct net_device *mirred_dev,
341                                     struct mlx5e_encap_entry *e)
342 {
343         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
344         struct ip_tunnel_key *tun_key = &e->tun_info.key;
345         struct net_device *out_dev, *route_dev;
346         struct neighbour *n = NULL;
347         struct flowi6 fl6 = {};
348         struct ipv6hdr *ip6h;
349         int ipv6_encap_size;
350         char *encap_header;
351         u8 nud_state, ttl;
352         int err;
353
354         ttl = tun_key->ttl;
355
356         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
357         fl6.daddr = tun_key->u.ipv6.dst;
358         fl6.saddr = tun_key->u.ipv6.src;
359
360         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
361                                       &fl6, &n, &ttl);
362         if (err)
363                 return err;
364
365         ipv6_encap_size =
366                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
367                 sizeof(struct ipv6hdr) +
368                 e->tunnel_hlen;
369
370         if (max_encap_size < ipv6_encap_size) {
371                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
372                                ipv6_encap_size, max_encap_size);
373                 return -EOPNOTSUPP;
374         }
375
376         encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
377         if (!encap_header)
378                 return -ENOMEM;
379
380         /* used by mlx5e_detach_encap to lookup a neigh hash table
381          * entry in the neigh hash table when a user deletes a rule
382          */
383         e->m_neigh.dev = n->dev;
384         e->m_neigh.family = n->ops->family;
385         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
386         e->out_dev = out_dev;
387         e->route_dev = route_dev;
388
389         /* It's importent to add the neigh to the hash table before checking
390          * the neigh validity state. So if we'll get a notification, in case the
391          * neigh changes it's validity state, we would find the relevant neigh
392          * in the hash.
393          */
394         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
395         if (err)
396                 goto free_encap;
397
398         read_lock_bh(&n->lock);
399         nud_state = n->nud_state;
400         ether_addr_copy(e->h_dest, n->ha);
401         read_unlock_bh(&n->lock);
402
403         /* add ethernet header */
404         ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
405                                                  ETH_P_IPV6);
406
407         /* add ip header */
408         ip6_flow_hdr(ip6h, tun_key->tos, 0);
409         /* the HW fills up ipv6 payload len */
410         ip6h->hop_limit   = ttl;
411         ip6h->daddr       = fl6.daddr;
412         ip6h->saddr       = fl6.saddr;
413
414         /* add tunneling protocol header */
415         err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
416                                          &ip6h->nexthdr, e);
417         if (err)
418                 goto destroy_neigh_entry;
419
420         e->encap_size = ipv6_encap_size;
421         e->encap_header = encap_header;
422
423         if (!(nud_state & NUD_VALID)) {
424                 neigh_event_send(n, NULL);
425                 /* the encap entry will be made valid on neigh update event
426                  * and not used before that.
427                  */
428                 goto out;
429         }
430
431         err = mlx5_packet_reformat_alloc(priv->mdev,
432                                          e->reformat_type,
433                                          ipv6_encap_size, encap_header,
434                                          MLX5_FLOW_NAMESPACE_FDB,
435                                          &e->encap_id);
436         if (err)
437                 goto destroy_neigh_entry;
438
439         e->flags |= MLX5_ENCAP_ENTRY_VALID;
440         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
441         neigh_release(n);
442         return err;
443
444 destroy_neigh_entry:
445         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
446 free_encap:
447         kfree(encap_header);
448 out:
449         if (n)
450                 neigh_release(n);
451         return err;
452 }
453
454 int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
455 {
456         if (netif_is_vxlan(tunnel_dev))
457                 return MLX5E_TC_TUNNEL_TYPE_VXLAN;
458         else if (netif_is_gretap(tunnel_dev) ||
459                  netif_is_ip6gretap(tunnel_dev))
460                 return MLX5E_TC_TUNNEL_TYPE_GRETAP;
461         else
462                 return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
463 }
464
465 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
466                                     struct net_device *netdev)
467 {
468         int tunnel_type = mlx5e_tc_tun_get_type(netdev);
469
470         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
471             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
472                 return true;
473         else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
474                  MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
475                 return true;
476         else
477                 return false;
478 }
479
480 int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
481                                  struct mlx5e_priv *priv,
482                                  struct mlx5e_encap_entry *e,
483                                  struct netlink_ext_ack *extack)
484 {
485         e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
486
487         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
488                 int dst_port =  be16_to_cpu(e->tun_info.key.tp_dst);
489
490                 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
491                         NL_SET_ERR_MSG_MOD(extack,
492                                            "vxlan udp dport was not registered with the HW");
493                         netdev_warn(priv->netdev,
494                                     "%d isn't an offloaded vxlan udp dport\n",
495                                     dst_port);
496                         return -EOPNOTSUPP;
497                 }
498                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
499                 e->tunnel_hlen = VXLAN_HLEN;
500         } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
501                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
502                 e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
503         } else {
504                 e->reformat_type = -1;
505                 e->tunnel_hlen = -1;
506                 return -EOPNOTSUPP;
507         }
508         return 0;
509 }
510
511 static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
512                                     struct mlx5_flow_spec *spec,
513                                     struct tc_cls_flower_offload *f,
514                                     void *headers_c,
515                                     void *headers_v)
516 {
517         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
518         struct netlink_ext_ack *extack = f->common.extack;
519         void *misc_c = MLX5_ADDR_OF(fte_match_param,
520                                     spec->match_criteria,
521                                     misc_parameters);
522         void *misc_v = MLX5_ADDR_OF(fte_match_param,
523                                     spec->match_value,
524                                     misc_parameters);
525         struct flow_match_ports enc_ports;
526
527         flow_rule_match_enc_ports(rule, &enc_ports);
528
529         /* Full udp dst port must be given */
530         if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
531             memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
532                 NL_SET_ERR_MSG_MOD(extack,
533                                    "VXLAN decap filter must include enc_dst_port condition");
534                 netdev_warn(priv->netdev,
535                             "VXLAN decap filter must include enc_dst_port condition\n");
536                 return -EOPNOTSUPP;
537         }
538
539         /* udp dst port must be knonwn as a VXLAN port */
540         if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
541                 NL_SET_ERR_MSG_MOD(extack,
542                                    "Matched UDP port is not registered as a VXLAN port");
543                 netdev_warn(priv->netdev,
544                             "UDP port %d is not registered as a VXLAN port\n",
545                             be16_to_cpu(enc_ports.key->dst));
546                 return -EOPNOTSUPP;
547         }
548
549         /* dst UDP port is valid here */
550         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
551         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
552
553         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
554                  ntohs(enc_ports.mask->dst));
555         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
556                  ntohs(enc_ports.key->dst));
557
558         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
559                  ntohs(enc_ports.mask->src));
560         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
561                  ntohs(enc_ports.key->src));
562
563         /* match on VNI */
564         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
565                 struct flow_match_enc_keyid enc_keyid;
566
567                 flow_rule_match_enc_keyid(rule, &enc_keyid);
568
569                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
570                          be32_to_cpu(enc_keyid.mask->keyid));
571                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
572                          be32_to_cpu(enc_keyid.key->keyid));
573         }
574         return 0;
575 }
576
577 static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
578                                      struct mlx5_flow_spec *spec,
579                                      struct tc_cls_flower_offload *f,
580                                      void *outer_headers_c,
581                                      void *outer_headers_v)
582 {
583         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
584                                     misc_parameters);
585         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
586                                     misc_parameters);
587         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
588
589         if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
590                 NL_SET_ERR_MSG_MOD(f->common.extack,
591                                    "GRE HW offloading is not supported");
592                 netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
593                 return -EOPNOTSUPP;
594         }
595
596         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
597         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
598                  ip_protocol, IPPROTO_GRE);
599
600         /* gre protocol*/
601         MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
602         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
603
604         /* gre key */
605         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
606                 struct flow_match_enc_keyid enc_keyid;
607
608                 flow_rule_match_enc_keyid(rule, &enc_keyid);
609                 MLX5_SET(fte_match_set_misc, misc_c,
610                          gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
611                 MLX5_SET(fte_match_set_misc, misc_v,
612                          gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
613         }
614
615         return 0;
616 }
617
618 int mlx5e_tc_tun_parse(struct net_device *filter_dev,
619                        struct mlx5e_priv *priv,
620                        struct mlx5_flow_spec *spec,
621                        struct tc_cls_flower_offload *f,
622                        void *headers_c,
623                        void *headers_v, u8 *match_level)
624 {
625         int tunnel_type;
626         int err = 0;
627
628         tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
629         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
630                 *match_level = MLX5_MATCH_L4;
631                 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
632                                                headers_c, headers_v);
633         } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
634                 *match_level = MLX5_MATCH_L3;
635                 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
636                                                 headers_c, headers_v);
637         } else {
638                 netdev_warn(priv->netdev,
639                             "decapsulation offload is not supported for %s net device (%d)\n",
640                             mlx5e_netdev_kind(filter_dev), tunnel_type);
641                 return -EOPNOTSUPP;
642         }
643         return err;
644 }