]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/netronome/nfp/flower/action.c
nfp: flower: add ipv4 set ttl and tos offload
[linux.git] / drivers / net / ethernet / netronome / nfp / flower / action.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/bitfield.h>
5 #include <net/geneve.h>
6 #include <net/pkt_cls.h>
7 #include <net/switchdev.h>
8 #include <net/tc_act/tc_csum.h>
9 #include <net/tc_act/tc_gact.h>
10 #include <net/tc_act/tc_mirred.h>
11 #include <net/tc_act/tc_pedit.h>
12 #include <net/tc_act/tc_vlan.h>
13 #include <net/tc_act/tc_tunnel_key.h>
14
15 #include "cmsg.h"
16 #include "main.h"
17 #include "../nfp_net_repr.h"
18
19 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
20  * to change. Such changes will break our FW ABI.
21  */
22 #define NFP_FL_TUNNEL_CSUM                      cpu_to_be16(0x01)
23 #define NFP_FL_TUNNEL_KEY                       cpu_to_be16(0x04)
24 #define NFP_FL_TUNNEL_GENEVE_OPT                cpu_to_be16(0x0800)
25 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS      IP_TUNNEL_INFO_TX
26 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS     (NFP_FL_TUNNEL_CSUM | \
27                                                  NFP_FL_TUNNEL_KEY | \
28                                                  NFP_FL_TUNNEL_GENEVE_OPT)
29
30 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
31 {
32         size_t act_size = sizeof(struct nfp_fl_pop_vlan);
33
34         pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
35         pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
36         pop_vlan->reserved = 0;
37 }
38
39 static void
40 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
41                  const struct tc_action *action)
42 {
43         size_t act_size = sizeof(struct nfp_fl_push_vlan);
44         u16 tmp_push_vlan_tci;
45
46         push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
47         push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
48         push_vlan->reserved = 0;
49         push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
50
51         tmp_push_vlan_tci =
52                 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
53                 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
54                 NFP_FL_PUSH_VLAN_CFI;
55         push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
56 }
57
58 static int
59 nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
60                struct nfp_fl_payload *nfp_flow, int act_len)
61 {
62         size_t act_size = sizeof(struct nfp_fl_pre_lag);
63         struct nfp_fl_pre_lag *pre_lag;
64         struct net_device *out_dev;
65         int err;
66
67         out_dev = tcf_mirred_dev(action);
68         if (!out_dev || !netif_is_lag_master(out_dev))
69                 return 0;
70
71         if (act_len + act_size > NFP_FL_MAX_A_SIZ)
72                 return -EOPNOTSUPP;
73
74         /* Pre_lag action must be first on action list.
75          * If other actions already exist they need pushed forward.
76          */
77         if (act_len)
78                 memmove(nfp_flow->action_data + act_size,
79                         nfp_flow->action_data, act_len);
80
81         pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
82         err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
83         if (err)
84                 return err;
85
86         pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
87         pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
88
89         nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
90
91         return act_size;
92 }
93
94 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
95                                          enum nfp_flower_tun_type tun_type)
96 {
97         if (!out_dev->rtnl_link_ops)
98                 return false;
99
100         if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
101                 return tun_type == NFP_FL_TUNNEL_VXLAN;
102
103         if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
104                 return tun_type == NFP_FL_TUNNEL_GENEVE;
105
106         return false;
107 }
108
109 static int
110 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
111               const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
112               bool last, struct net_device *in_dev,
113               enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
114 {
115         size_t act_size = sizeof(struct nfp_fl_output);
116         struct nfp_flower_priv *priv = app->priv;
117         struct net_device *out_dev;
118         u16 tmp_flags;
119
120         output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
121         output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
122
123         out_dev = tcf_mirred_dev(action);
124         if (!out_dev)
125                 return -EOPNOTSUPP;
126
127         tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
128
129         if (tun_type) {
130                 /* Verify the egress netdev matches the tunnel type. */
131                 if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
132                         return -EOPNOTSUPP;
133
134                 if (*tun_out_cnt)
135                         return -EOPNOTSUPP;
136                 (*tun_out_cnt)++;
137
138                 output->flags = cpu_to_be16(tmp_flags |
139                                             NFP_FL_OUT_FLAGS_USE_TUN);
140                 output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
141         } else if (netif_is_lag_master(out_dev) &&
142                    priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
143                 int gid;
144
145                 output->flags = cpu_to_be16(tmp_flags);
146                 gid = nfp_flower_lag_get_output_id(app, out_dev);
147                 if (gid < 0)
148                         return gid;
149                 output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
150         } else {
151                 /* Set action output parameters. */
152                 output->flags = cpu_to_be16(tmp_flags);
153
154                 /* Only offload if egress ports are on the same device as the
155                  * ingress port.
156                  */
157                 if (!switchdev_port_same_parent_id(in_dev, out_dev))
158                         return -EOPNOTSUPP;
159                 if (!nfp_netdev_is_nfp_repr(out_dev))
160                         return -EOPNOTSUPP;
161
162                 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
163                 if (!output->port)
164                         return -EOPNOTSUPP;
165         }
166         nfp_flow->meta.shortcut = output->port;
167
168         return 0;
169 }
170
171 static enum nfp_flower_tun_type
172 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
173                                 const struct tc_action *action)
174 {
175         struct ip_tunnel_info *tun = tcf_tunnel_info(action);
176         struct nfp_flower_priv *priv = app->priv;
177
178         switch (tun->key.tp_dst) {
179         case htons(NFP_FL_VXLAN_PORT):
180                 return NFP_FL_TUNNEL_VXLAN;
181         case htons(NFP_FL_GENEVE_PORT):
182                 if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
183                         return NFP_FL_TUNNEL_GENEVE;
184                 /* FALLTHROUGH */
185         default:
186                 return NFP_FL_TUNNEL_NONE;
187         }
188 }
189
190 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
191 {
192         size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
193         struct nfp_fl_pre_tunnel *pre_tun_act;
194
195         /* Pre_tunnel action must be first on action list.
196          * If other actions already exist they need to be pushed forward.
197          */
198         if (act_len)
199                 memmove(act_data + act_size, act_data, act_len);
200
201         pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
202
203         memset(pre_tun_act, 0, act_size);
204
205         pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
206         pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
207
208         return pre_tun_act;
209 }
210
211 static int
212 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
213                            const struct tc_action *action)
214 {
215         struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
216         int opt_len, opt_cnt, act_start, tot_push_len;
217         u8 *src = ip_tunnel_info_opts(ip_tun);
218
219         /* We need to populate the options in reverse order for HW.
220          * Therefore we go through the options, calculating the
221          * number of options and the total size, then we populate
222          * them in reverse order in the action list.
223          */
224         opt_cnt = 0;
225         tot_push_len = 0;
226         opt_len = ip_tun->options_len;
227         while (opt_len > 0) {
228                 struct geneve_opt *opt = (struct geneve_opt *)src;
229
230                 opt_cnt++;
231                 if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
232                         return -EOPNOTSUPP;
233
234                 tot_push_len += sizeof(struct nfp_fl_push_geneve) +
235                                opt->length * 4;
236                 if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
237                         return -EOPNOTSUPP;
238
239                 opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
240                 src += sizeof(struct geneve_opt) + opt->length * 4;
241         }
242
243         if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
244                 return -EOPNOTSUPP;
245
246         act_start = *list_len;
247         *list_len += tot_push_len;
248         src = ip_tunnel_info_opts(ip_tun);
249         while (opt_cnt) {
250                 struct geneve_opt *opt = (struct geneve_opt *)src;
251                 struct nfp_fl_push_geneve *push;
252                 size_t act_size, len;
253
254                 opt_cnt--;
255                 act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
256                 tot_push_len -= act_size;
257                 len = act_start + tot_push_len;
258
259                 push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
260                 push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
261                 push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
262                 push->reserved = 0;
263                 push->class = opt->opt_class;
264                 push->type = opt->type;
265                 push->length = opt->length;
266                 memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
267
268                 src += sizeof(struct geneve_opt) + opt->length * 4;
269         }
270
271         return 0;
272 }
273
274 static int
275 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
276                         struct nfp_fl_set_ipv4_udp_tun *set_tun,
277                         const struct tc_action *action,
278                         struct nfp_fl_pre_tunnel *pre_tun,
279                         enum nfp_flower_tun_type tun_type,
280                         struct net_device *netdev)
281 {
282         size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
283         struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
284         struct nfp_flower_priv *priv = app->priv;
285         u32 tmp_set_ip_tun_type_index = 0;
286         /* Currently support one pre-tunnel so index is always 0. */
287         int pretun_idx = 0;
288
289         BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
290                      NFP_FL_TUNNEL_KEY  != TUNNEL_KEY ||
291                      NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
292         if (ip_tun->options_len &&
293             (tun_type != NFP_FL_TUNNEL_GENEVE ||
294             !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
295                 return -EOPNOTSUPP;
296
297         set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
298         set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
299
300         /* Set tunnel type and pre-tunnel index. */
301         tmp_set_ip_tun_type_index |=
302                 FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
303                 FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
304
305         set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
306         set_tun->tun_id = ip_tun->key.tun_id;
307
308         if (ip_tun->key.ttl) {
309                 set_tun->ttl = ip_tun->key.ttl;
310         } else {
311                 struct net *net = dev_net(netdev);
312                 struct flowi4 flow = {};
313                 struct rtable *rt;
314                 int err;
315
316                 /* Do a route lookup to determine ttl - if fails then use
317                  * default. Note that CONFIG_INET is a requirement of
318                  * CONFIG_NET_SWITCHDEV so must be defined here.
319                  */
320                 flow.daddr = ip_tun->key.u.ipv4.dst;
321                 flow.flowi4_proto = IPPROTO_UDP;
322                 rt = ip_route_output_key(net, &flow);
323                 err = PTR_ERR_OR_ZERO(rt);
324                 if (!err) {
325                         set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
326                         ip_rt_put(rt);
327                 } else {
328                         set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
329                 }
330         }
331
332         set_tun->tos = ip_tun->key.tos;
333
334         if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
335             ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
336                 return -EOPNOTSUPP;
337         set_tun->tun_flags = ip_tun->key.tun_flags;
338
339         if (tun_type == NFP_FL_TUNNEL_GENEVE) {
340                 set_tun->tun_proto = htons(ETH_P_TEB);
341                 set_tun->tun_len = ip_tun->options_len / 4;
342         }
343
344         /* Complete pre_tunnel action. */
345         pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
346
347         return 0;
348 }
349
350 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
351 {
352         u32 oldvalue = get_unaligned((u32 *)p_exact);
353         u32 oldmask = get_unaligned((u32 *)p_mask);
354
355         value &= mask;
356         value |= oldvalue & ~mask;
357
358         put_unaligned(oldmask | mask, (u32 *)p_mask);
359         put_unaligned(value, (u32 *)p_exact);
360 }
361
362 static int
363 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
364                struct nfp_fl_set_eth *set_eth)
365 {
366         u32 exact, mask;
367
368         if (off + 4 > ETH_ALEN * 2)
369                 return -EOPNOTSUPP;
370
371         mask = ~tcf_pedit_mask(action, idx);
372         exact = tcf_pedit_val(action, idx);
373
374         if (exact & ~mask)
375                 return -EOPNOTSUPP;
376
377         nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
378                             &set_eth->eth_addr_mask[off]);
379
380         set_eth->reserved = cpu_to_be16(0);
381         set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
382         set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
383
384         return 0;
385 }
386
387 struct ipv4_ttl_word {
388         __u8    ttl;
389         __u8    protocol;
390         __sum16 check;
391 };
392
393 static int
394 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
395                struct nfp_fl_set_ip4_addrs *set_ip_addr,
396                struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
397 {
398         struct ipv4_ttl_word *ttl_word_mask;
399         struct ipv4_ttl_word *ttl_word;
400         struct iphdr *tos_word_mask;
401         struct iphdr *tos_word;
402         __be32 exact, mask;
403
404         /* We are expecting tcf_pedit to return a big endian value */
405         mask = (__force __be32)~tcf_pedit_mask(action, idx);
406         exact = (__force __be32)tcf_pedit_val(action, idx);
407
408         if (exact & ~mask)
409                 return -EOPNOTSUPP;
410
411         switch (off) {
412         case offsetof(struct iphdr, daddr):
413                 set_ip_addr->ipv4_dst_mask |= mask;
414                 set_ip_addr->ipv4_dst &= ~mask;
415                 set_ip_addr->ipv4_dst |= exact & mask;
416                 set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
417                 set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
418                                            NFP_FL_LW_SIZ;
419                 break;
420         case offsetof(struct iphdr, saddr):
421                 set_ip_addr->ipv4_src_mask |= mask;
422                 set_ip_addr->ipv4_src &= ~mask;
423                 set_ip_addr->ipv4_src |= exact & mask;
424                 set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
425                 set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
426                                            NFP_FL_LW_SIZ;
427                 break;
428         case offsetof(struct iphdr, ttl):
429                 ttl_word_mask = (struct ipv4_ttl_word *)&mask;
430                 ttl_word = (struct ipv4_ttl_word *)&exact;
431
432                 if (ttl_word_mask->protocol || ttl_word_mask->check)
433                         return -EOPNOTSUPP;
434
435                 set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
436                 set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
437                 set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
438                 set_ip_ttl_tos->head.jump_id =
439                         NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
440                 set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
441                                               NFP_FL_LW_SIZ;
442                 break;
443         case round_down(offsetof(struct iphdr, tos), 4):
444                 tos_word_mask = (struct iphdr *)&mask;
445                 tos_word = (struct iphdr *)&exact;
446
447                 if (tos_word_mask->version || tos_word_mask->ihl ||
448                     tos_word_mask->tot_len)
449                         return -EOPNOTSUPP;
450
451                 set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
452                 set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
453                 set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
454                 set_ip_ttl_tos->head.jump_id =
455                         NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
456                 set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
457                                               NFP_FL_LW_SIZ;
458                 break;
459         default:
460                 return -EOPNOTSUPP;
461         }
462
463         return 0;
464 }
465
466 static void
467 nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
468                       struct nfp_fl_set_ipv6_addr *ip6)
469 {
470         ip6->ipv6[word].mask |= mask;
471         ip6->ipv6[word].exact &= ~mask;
472         ip6->ipv6[word].exact |= exact & mask;
473
474         ip6->reserved = cpu_to_be16(0);
475         ip6->head.jump_id = opcode_tag;
476         ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
477 }
478
479 static int
480 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
481                struct nfp_fl_set_ipv6_addr *ip_dst,
482                struct nfp_fl_set_ipv6_addr *ip_src)
483 {
484         __be32 exact, mask;
485         u8 word;
486
487         /* We are expecting tcf_pedit to return a big endian value */
488         mask = (__force __be32)~tcf_pedit_mask(action, idx);
489         exact = (__force __be32)tcf_pedit_val(action, idx);
490
491         if (exact & ~mask)
492                 return -EOPNOTSUPP;
493
494         if (off < offsetof(struct ipv6hdr, saddr)) {
495                 return -EOPNOTSUPP;
496         } else if (off < offsetof(struct ipv6hdr, daddr)) {
497                 word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
498                 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
499                                       exact, mask, ip_src);
500         } else if (off < offsetof(struct ipv6hdr, daddr) +
501                        sizeof(struct in6_addr)) {
502                 word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
503                 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
504                                       exact, mask, ip_dst);
505         } else {
506                 return -EOPNOTSUPP;
507         }
508
509         return 0;
510 }
511
512 static int
513 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
514                  struct nfp_fl_set_tport *set_tport, int opcode)
515 {
516         u32 exact, mask;
517
518         if (off)
519                 return -EOPNOTSUPP;
520
521         mask = ~tcf_pedit_mask(action, idx);
522         exact = tcf_pedit_val(action, idx);
523
524         if (exact & ~mask)
525                 return -EOPNOTSUPP;
526
527         nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
528                             set_tport->tp_port_mask);
529
530         set_tport->reserved = cpu_to_be16(0);
531         set_tport->head.jump_id = opcode;
532         set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
533
534         return 0;
535 }
536
537 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
538 {
539         switch (ip_proto) {
540         case 0:
541                 /* Filter doesn't force proto match,
542                  * both TCP and UDP will be updated if encountered
543                  */
544                 return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
545         case IPPROTO_TCP:
546                 return TCA_CSUM_UPDATE_FLAG_TCP;
547         case IPPROTO_UDP:
548                 return TCA_CSUM_UPDATE_FLAG_UDP;
549         default:
550                 /* All other protocols will be ignored by FW */
551                 return 0;
552         }
553 }
554
555 static int
556 nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
557              char *nfp_action, int *a_len, u32 *csum_updated)
558 {
559         struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
560         struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
561         struct nfp_fl_set_ip4_addrs set_ip_addr;
562         struct nfp_fl_set_tport set_tport;
563         struct nfp_fl_set_eth set_eth;
564         enum pedit_header_type htype;
565         int idx, nkeys, err;
566         size_t act_size = 0;
567         u32 offset, cmd;
568         u8 ip_proto = 0;
569
570         memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
571         memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
572         memset(&set_ip6_src, 0, sizeof(set_ip6_src));
573         memset(&set_ip_addr, 0, sizeof(set_ip_addr));
574         memset(&set_tport, 0, sizeof(set_tport));
575         memset(&set_eth, 0, sizeof(set_eth));
576         nkeys = tcf_pedit_nkeys(action);
577
578         for (idx = 0; idx < nkeys; idx++) {
579                 cmd = tcf_pedit_cmd(action, idx);
580                 htype = tcf_pedit_htype(action, idx);
581                 offset = tcf_pedit_offset(action, idx);
582
583                 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
584                         return -EOPNOTSUPP;
585
586                 switch (htype) {
587                 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
588                         err = nfp_fl_set_eth(action, idx, offset, &set_eth);
589                         break;
590                 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
591                         err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
592                                              &set_ip_ttl_tos);
593                         break;
594                 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
595                         err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
596                                              &set_ip6_src);
597                         break;
598                 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
599                         err = nfp_fl_set_tport(action, idx, offset, &set_tport,
600                                                NFP_FL_ACTION_OPCODE_SET_TCP);
601                         break;
602                 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
603                         err = nfp_fl_set_tport(action, idx, offset, &set_tport,
604                                                NFP_FL_ACTION_OPCODE_SET_UDP);
605                         break;
606                 default:
607                         return -EOPNOTSUPP;
608                 }
609                 if (err)
610                         return err;
611         }
612
613         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
614                 struct flow_dissector_key_basic *basic;
615
616                 basic = skb_flow_dissector_target(flow->dissector,
617                                                   FLOW_DISSECTOR_KEY_BASIC,
618                                                   flow->key);
619                 ip_proto = basic->ip_proto;
620         }
621
622         if (set_eth.head.len_lw) {
623                 act_size = sizeof(set_eth);
624                 memcpy(nfp_action, &set_eth, act_size);
625                 *a_len += act_size;
626         }
627         if (set_ip_ttl_tos.head.len_lw) {
628                 nfp_action += act_size;
629                 act_size = sizeof(set_ip_ttl_tos);
630                 memcpy(nfp_action, &set_ip_ttl_tos, act_size);
631                 *a_len += act_size;
632
633                 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
634                 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
635                                 nfp_fl_csum_l4_to_flag(ip_proto);
636         }
637         if (set_ip_addr.head.len_lw) {
638                 nfp_action += act_size;
639                 act_size = sizeof(set_ip_addr);
640                 memcpy(nfp_action, &set_ip_addr, act_size);
641                 *a_len += act_size;
642
643                 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
644                 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
645                                 nfp_fl_csum_l4_to_flag(ip_proto);
646         }
647         if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
648                 /* TC compiles set src and dst IPv6 address as a single action,
649                  * the hardware requires this to be 2 separate actions.
650                  */
651                 nfp_action += act_size;
652                 act_size = sizeof(set_ip6_src);
653                 memcpy(nfp_action, &set_ip6_src, act_size);
654                 *a_len += act_size;
655
656                 act_size = sizeof(set_ip6_dst);
657                 memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
658                        act_size);
659                 *a_len += act_size;
660
661                 /* Hardware will automatically fix TCP/UDP checksum. */
662                 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
663         } else if (set_ip6_dst.head.len_lw) {
664                 nfp_action += act_size;
665                 act_size = sizeof(set_ip6_dst);
666                 memcpy(nfp_action, &set_ip6_dst, act_size);
667                 *a_len += act_size;
668
669                 /* Hardware will automatically fix TCP/UDP checksum. */
670                 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
671         } else if (set_ip6_src.head.len_lw) {
672                 nfp_action += act_size;
673                 act_size = sizeof(set_ip6_src);
674                 memcpy(nfp_action, &set_ip6_src, act_size);
675                 *a_len += act_size;
676
677                 /* Hardware will automatically fix TCP/UDP checksum. */
678                 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
679         }
680         if (set_tport.head.len_lw) {
681                 nfp_action += act_size;
682                 act_size = sizeof(set_tport);
683                 memcpy(nfp_action, &set_tport, act_size);
684                 *a_len += act_size;
685
686                 /* Hardware will automatically fix TCP/UDP checksum. */
687                 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
688         }
689
690         return 0;
691 }
692
693 static int
694 nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
695                          struct nfp_fl_payload *nfp_fl, int *a_len,
696                          struct net_device *netdev, bool last,
697                          enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
698                          int *out_cnt, u32 *csum_updated)
699 {
700         struct nfp_flower_priv *priv = app->priv;
701         struct nfp_fl_output *output;
702         int err, prelag_size;
703
704         /* If csum_updated has not been reset by now, it means HW will
705          * incorrectly update csums when they are not requested.
706          */
707         if (*csum_updated)
708                 return -EOPNOTSUPP;
709
710         if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
711                 return -EOPNOTSUPP;
712
713         output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
714         err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
715                             tun_out_cnt);
716         if (err)
717                 return err;
718
719         *a_len += sizeof(struct nfp_fl_output);
720
721         if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
722                 /* nfp_fl_pre_lag returns -err or size of prelag action added.
723                  * This will be 0 if it is not egressing to a lag dev.
724                  */
725                 prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
726                 if (prelag_size < 0)
727                         return prelag_size;
728                 else if (prelag_size > 0 && (!last || *out_cnt))
729                         return -EOPNOTSUPP;
730
731                 *a_len += prelag_size;
732         }
733         (*out_cnt)++;
734
735         return 0;
736 }
737
738 static int
739 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
740                        struct tc_cls_flower_offload *flow,
741                        struct nfp_fl_payload *nfp_fl, int *a_len,
742                        struct net_device *netdev,
743                        enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
744                        int *out_cnt, u32 *csum_updated)
745 {
746         struct nfp_fl_set_ipv4_udp_tun *set_tun;
747         struct nfp_fl_pre_tunnel *pre_tun;
748         struct nfp_fl_push_vlan *psh_v;
749         struct nfp_fl_pop_vlan *pop_v;
750         int err;
751
752         if (is_tcf_gact_shot(a)) {
753                 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
754         } else if (is_tcf_mirred_egress_redirect(a)) {
755                 err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
756                                                true, tun_type, tun_out_cnt,
757                                                out_cnt, csum_updated);
758                 if (err)
759                         return err;
760
761         } else if (is_tcf_mirred_egress_mirror(a)) {
762                 err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
763                                                false, tun_type, tun_out_cnt,
764                                                out_cnt, csum_updated);
765                 if (err)
766                         return err;
767
768         } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
769                 if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
770                         return -EOPNOTSUPP;
771
772                 pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
773                 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
774
775                 nfp_fl_pop_vlan(pop_v);
776                 *a_len += sizeof(struct nfp_fl_pop_vlan);
777         } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
778                 if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
779                         return -EOPNOTSUPP;
780
781                 psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
782                 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
783
784                 nfp_fl_push_vlan(psh_v, a);
785                 *a_len += sizeof(struct nfp_fl_push_vlan);
786         } else if (is_tcf_tunnel_set(a)) {
787                 struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
788                 struct nfp_repr *repr = netdev_priv(netdev);
789
790                 *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
791                 if (*tun_type == NFP_FL_TUNNEL_NONE)
792                         return -EOPNOTSUPP;
793
794                 if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
795                         return -EOPNOTSUPP;
796
797                 /* Pre-tunnel action is required for tunnel encap.
798                  * This checks for next hop entries on NFP.
799                  * If none, the packet falls back before applying other actions.
800                  */
801                 if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
802                     sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
803                         return -EOPNOTSUPP;
804
805                 pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
806                 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
807                 *a_len += sizeof(struct nfp_fl_pre_tunnel);
808
809                 err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
810                 if (err)
811                         return err;
812
813                 set_tun = (void *)&nfp_fl->action_data[*a_len];
814                 err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
815                                               *tun_type, netdev);
816                 if (err)
817                         return err;
818                 *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
819         } else if (is_tcf_tunnel_release(a)) {
820                 /* Tunnel decap is handled by default so accept action. */
821                 return 0;
822         } else if (is_tcf_pedit(a)) {
823                 if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
824                                  a_len, csum_updated))
825                         return -EOPNOTSUPP;
826         } else if (is_tcf_csum(a)) {
827                 /* csum action requests recalc of something we have not fixed */
828                 if (tcf_csum_update_flags(a) & ~*csum_updated)
829                         return -EOPNOTSUPP;
830                 /* If we will correctly fix the csum we can remove it from the
831                  * csum update list. Which will later be used to check support.
832                  */
833                 *csum_updated &= ~tcf_csum_update_flags(a);
834         } else {
835                 /* Currently we do not handle any other actions. */
836                 return -EOPNOTSUPP;
837         }
838
839         return 0;
840 }
841
842 int nfp_flower_compile_action(struct nfp_app *app,
843                               struct tc_cls_flower_offload *flow,
844                               struct net_device *netdev,
845                               struct nfp_fl_payload *nfp_flow)
846 {
847         int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
848         enum nfp_flower_tun_type tun_type;
849         const struct tc_action *a;
850         u32 csum_updated = 0;
851
852         memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
853         nfp_flow->meta.act_len = 0;
854         tun_type = NFP_FL_TUNNEL_NONE;
855         act_len = 0;
856         act_cnt = 0;
857         tun_out_cnt = 0;
858         out_cnt = 0;
859
860         tcf_exts_for_each_action(i, a, flow->exts) {
861                 err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
862                                              netdev, &tun_type, &tun_out_cnt,
863                                              &out_cnt, &csum_updated);
864                 if (err)
865                         return err;
866                 act_cnt++;
867         }
868
869         /* We optimise when the action list is small, this can unfortunately
870          * not happen once we have more than one action in the action list.
871          */
872         if (act_cnt > 1)
873                 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
874
875         nfp_flow->meta.act_len = act_len;
876
877         return 0;
878 }