1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_flower.c Flower classifier
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
18 #include <linux/mpls.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
27 #include <net/dst_metadata.h>
29 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 struct flow_dissector_key_meta meta;
33 struct flow_dissector_key_control control;
34 struct flow_dissector_key_control enc_control;
35 struct flow_dissector_key_basic basic;
36 struct flow_dissector_key_eth_addrs eth;
37 struct flow_dissector_key_vlan vlan;
38 struct flow_dissector_key_vlan cvlan;
40 struct flow_dissector_key_ipv4_addrs ipv4;
41 struct flow_dissector_key_ipv6_addrs ipv6;
43 struct flow_dissector_key_ports tp;
44 struct flow_dissector_key_icmp icmp;
45 struct flow_dissector_key_arp arp;
46 struct flow_dissector_key_keyid enc_key_id;
48 struct flow_dissector_key_ipv4_addrs enc_ipv4;
49 struct flow_dissector_key_ipv6_addrs enc_ipv6;
51 struct flow_dissector_key_ports enc_tp;
52 struct flow_dissector_key_mpls mpls;
53 struct flow_dissector_key_tcp tcp;
54 struct flow_dissector_key_ip ip;
55 struct flow_dissector_key_ip enc_ip;
56 struct flow_dissector_key_enc_opts enc_opts;
57 struct flow_dissector_key_ports tp_min;
58 struct flow_dissector_key_ports tp_max;
59 struct flow_dissector_key_ct ct;
60 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
62 struct fl_flow_mask_range {
63 unsigned short int start;
64 unsigned short int end;
68 struct fl_flow_key key;
69 struct fl_flow_mask_range range;
71 struct rhash_head ht_node;
73 struct rhashtable_params filter_ht_params;
74 struct flow_dissector dissector;
75 struct list_head filters;
76 struct rcu_work rwork;
77 struct list_head list;
81 struct fl_flow_tmplt {
82 struct fl_flow_key dummy_key;
83 struct fl_flow_key mask;
84 struct flow_dissector dissector;
85 struct tcf_chain *chain;
90 spinlock_t masks_lock; /* Protect masks list */
91 struct list_head masks;
92 struct list_head hw_filters;
93 struct rcu_work rwork;
94 struct idr handle_idr;
97 struct cls_fl_filter {
98 struct fl_flow_mask *mask;
99 struct rhash_head ht_node;
100 struct fl_flow_key mkey;
101 struct tcf_exts exts;
102 struct tcf_result res;
103 struct fl_flow_key key;
104 struct list_head list;
105 struct list_head hw_list;
109 struct rcu_work rwork;
110 struct net_device *hw_dev;
111 /* Flower classifier is unlocked, which means that its reference counter
112 * can be changed concurrently without any kind of external
113 * synchronization. Use atomic reference counter to be concurrency-safe.
119 static const struct rhashtable_params mask_ht_params = {
120 .key_offset = offsetof(struct fl_flow_mask, key),
121 .key_len = sizeof(struct fl_flow_key),
122 .head_offset = offsetof(struct fl_flow_mask, ht_node),
123 .automatic_shrinking = true,
126 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
128 return mask->range.end - mask->range.start;
131 static void fl_mask_update_range(struct fl_flow_mask *mask)
133 const u8 *bytes = (const u8 *) &mask->key;
134 size_t size = sizeof(mask->key);
135 size_t i, first = 0, last;
137 for (i = 0; i < size; i++) {
144 for (i = size - 1; i != first; i--) {
150 mask->range.start = rounddown(first, sizeof(long));
151 mask->range.end = roundup(last + 1, sizeof(long));
154 static void *fl_key_get_start(struct fl_flow_key *key,
155 const struct fl_flow_mask *mask)
157 return (u8 *) key + mask->range.start;
160 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
161 struct fl_flow_mask *mask)
163 const long *lkey = fl_key_get_start(key, mask);
164 const long *lmask = fl_key_get_start(&mask->key, mask);
165 long *lmkey = fl_key_get_start(mkey, mask);
168 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
169 *lmkey++ = *lkey++ & *lmask++;
172 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
173 struct fl_flow_mask *mask)
175 const long *lmask = fl_key_get_start(&mask->key, mask);
181 ltmplt = fl_key_get_start(&tmplt->mask, mask);
182 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
183 if (~*ltmplt++ & *lmask++)
189 static void fl_clear_masked_range(struct fl_flow_key *key,
190 struct fl_flow_mask *mask)
192 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
195 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
196 struct fl_flow_key *key,
197 struct fl_flow_key *mkey)
199 __be16 min_mask, max_mask, min_val, max_val;
201 min_mask = htons(filter->mask->key.tp_min.dst);
202 max_mask = htons(filter->mask->key.tp_max.dst);
203 min_val = htons(filter->key.tp_min.dst);
204 max_val = htons(filter->key.tp_max.dst);
206 if (min_mask && max_mask) {
207 if (htons(key->tp.dst) < min_val ||
208 htons(key->tp.dst) > max_val)
211 /* skb does not have min and max values */
212 mkey->tp_min.dst = filter->mkey.tp_min.dst;
213 mkey->tp_max.dst = filter->mkey.tp_max.dst;
218 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
219 struct fl_flow_key *key,
220 struct fl_flow_key *mkey)
222 __be16 min_mask, max_mask, min_val, max_val;
224 min_mask = htons(filter->mask->key.tp_min.src);
225 max_mask = htons(filter->mask->key.tp_max.src);
226 min_val = htons(filter->key.tp_min.src);
227 max_val = htons(filter->key.tp_max.src);
229 if (min_mask && max_mask) {
230 if (htons(key->tp.src) < min_val ||
231 htons(key->tp.src) > max_val)
234 /* skb does not have min and max values */
235 mkey->tp_min.src = filter->mkey.tp_min.src;
236 mkey->tp_max.src = filter->mkey.tp_max.src;
241 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
242 struct fl_flow_key *mkey)
244 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
245 mask->filter_ht_params);
248 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
249 struct fl_flow_key *mkey,
250 struct fl_flow_key *key)
252 struct cls_fl_filter *filter, *f;
254 list_for_each_entry_rcu(filter, &mask->filters, list) {
255 if (!fl_range_port_dst_cmp(filter, key, mkey))
258 if (!fl_range_port_src_cmp(filter, key, mkey))
261 f = __fl_lookup(mask, mkey);
268 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
269 struct fl_flow_key *mkey,
270 struct fl_flow_key *key)
272 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
273 return fl_lookup_range(mask, mkey, key);
275 return __fl_lookup(mask, mkey);
278 static u16 fl_ct_info_to_flower_map[] = {
279 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
280 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
281 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
282 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
283 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
284 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
285 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
286 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
287 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
288 TCA_FLOWER_KEY_CT_FLAGS_NEW,
291 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
292 struct tcf_result *res)
294 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
295 struct fl_flow_key skb_mkey;
296 struct fl_flow_key skb_key;
297 struct fl_flow_mask *mask;
298 struct cls_fl_filter *f;
300 list_for_each_entry_rcu(mask, &head->masks, list) {
301 fl_clear_masked_range(&skb_key, mask);
303 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
304 /* skb_flow_dissect() does not set n_proto in case an unknown
305 * protocol, so do it rather here.
307 skb_key.basic.n_proto = skb->protocol;
308 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
309 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
310 fl_ct_info_to_flower_map,
311 ARRAY_SIZE(fl_ct_info_to_flower_map));
312 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
314 fl_set_masked_key(&skb_mkey, &skb_key, mask);
316 f = fl_lookup(mask, &skb_mkey, &skb_key);
317 if (f && !tc_skip_sw(f->flags)) {
319 return tcf_exts_exec(skb, &f->exts, res);
325 static int fl_init(struct tcf_proto *tp)
327 struct cls_fl_head *head;
329 head = kzalloc(sizeof(*head), GFP_KERNEL);
333 spin_lock_init(&head->masks_lock);
334 INIT_LIST_HEAD_RCU(&head->masks);
335 INIT_LIST_HEAD(&head->hw_filters);
336 rcu_assign_pointer(tp->root, head);
337 idr_init(&head->handle_idr);
339 return rhashtable_init(&head->ht, &mask_ht_params);
342 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
344 /* temporary masks don't have their filters list and ht initialized */
345 if (mask_init_done) {
346 WARN_ON(!list_empty(&mask->filters));
347 rhashtable_destroy(&mask->ht);
352 static void fl_mask_free_work(struct work_struct *work)
354 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
355 struct fl_flow_mask, rwork);
357 fl_mask_free(mask, true);
360 static void fl_uninit_mask_free_work(struct work_struct *work)
362 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
363 struct fl_flow_mask, rwork);
365 fl_mask_free(mask, false);
368 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
370 if (!refcount_dec_and_test(&mask->refcnt))
373 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
375 spin_lock(&head->masks_lock);
376 list_del_rcu(&mask->list);
377 spin_unlock(&head->masks_lock);
379 tcf_queue_work(&mask->rwork, fl_mask_free_work);
384 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
386 /* Flower classifier only changes root pointer during init and destroy.
387 * Users must obtain reference to tcf_proto instance before calling its
388 * API, so tp->root pointer is protected from concurrent call to
389 * fl_destroy() by reference counting.
391 return rcu_dereference_raw(tp->root);
394 static void __fl_destroy_filter(struct cls_fl_filter *f)
396 tcf_exts_destroy(&f->exts);
397 tcf_exts_put_net(&f->exts);
401 static void fl_destroy_filter_work(struct work_struct *work)
403 struct cls_fl_filter *f = container_of(to_rcu_work(work),
404 struct cls_fl_filter, rwork);
406 __fl_destroy_filter(f);
409 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
410 bool rtnl_held, struct netlink_ext_ack *extack)
412 struct tcf_block *block = tp->chain->block;
413 struct flow_cls_offload cls_flower = {};
418 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
419 cls_flower.command = FLOW_CLS_DESTROY;
420 cls_flower.cookie = (unsigned long) f;
422 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
423 spin_lock(&tp->lock);
424 list_del_init(&f->hw_list);
425 tcf_block_offload_dec(block, &f->flags);
426 spin_unlock(&tp->lock);
432 static int fl_hw_replace_filter(struct tcf_proto *tp,
433 struct cls_fl_filter *f, bool rtnl_held,
434 struct netlink_ext_ack *extack)
436 struct cls_fl_head *head = fl_head_dereference(tp);
437 struct tcf_block *block = tp->chain->block;
438 struct flow_cls_offload cls_flower = {};
439 bool skip_sw = tc_skip_sw(f->flags);
445 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
446 if (!cls_flower.rule) {
451 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
452 cls_flower.command = FLOW_CLS_REPLACE;
453 cls_flower.cookie = (unsigned long) f;
454 cls_flower.rule->match.dissector = &f->mask->dissector;
455 cls_flower.rule->match.mask = &f->mask->key;
456 cls_flower.rule->match.key = &f->mkey;
457 cls_flower.classid = f->res.classid;
459 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
461 kfree(cls_flower.rule);
463 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
469 err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
470 kfree(cls_flower.rule);
473 fl_hw_destroy_filter(tp, f, true, NULL);
475 } else if (err > 0) {
476 f->in_hw_count = err;
478 spin_lock(&tp->lock);
479 tcf_block_offload_inc(block, &f->flags);
480 spin_unlock(&tp->lock);
483 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
488 spin_lock(&tp->lock);
489 list_add(&f->hw_list, &head->hw_filters);
490 spin_unlock(&tp->lock);
498 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
501 struct tcf_block *block = tp->chain->block;
502 struct flow_cls_offload cls_flower = {};
507 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
508 cls_flower.command = FLOW_CLS_STATS;
509 cls_flower.cookie = (unsigned long) f;
510 cls_flower.classid = f->res.classid;
512 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
514 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
515 cls_flower.stats.pkts,
516 cls_flower.stats.lastused);
522 static void __fl_put(struct cls_fl_filter *f)
524 if (!refcount_dec_and_test(&f->refcnt))
527 if (tcf_exts_get_net(&f->exts))
528 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
530 __fl_destroy_filter(f);
533 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
535 struct cls_fl_filter *f;
538 f = idr_find(&head->handle_idr, handle);
539 if (f && !refcount_inc_not_zero(&f->refcnt))
546 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
547 bool *last, bool rtnl_held,
548 struct netlink_ext_ack *extack)
550 struct cls_fl_head *head = fl_head_dereference(tp);
554 spin_lock(&tp->lock);
556 spin_unlock(&tp->lock);
561 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
562 f->mask->filter_ht_params);
563 idr_remove(&head->handle_idr, f->handle);
564 list_del_rcu(&f->list);
565 spin_unlock(&tp->lock);
567 *last = fl_mask_put(head, f->mask);
568 if (!tc_skip_hw(f->flags))
569 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
570 tcf_unbind_filter(tp, &f->res);
576 static void fl_destroy_sleepable(struct work_struct *work)
578 struct cls_fl_head *head = container_of(to_rcu_work(work),
582 rhashtable_destroy(&head->ht);
584 module_put(THIS_MODULE);
587 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
588 struct netlink_ext_ack *extack)
590 struct cls_fl_head *head = fl_head_dereference(tp);
591 struct fl_flow_mask *mask, *next_mask;
592 struct cls_fl_filter *f, *next;
595 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
596 list_for_each_entry_safe(f, next, &mask->filters, list) {
597 __fl_delete(tp, f, &last, rtnl_held, extack);
602 idr_destroy(&head->handle_idr);
604 __module_get(THIS_MODULE);
605 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
608 static void fl_put(struct tcf_proto *tp, void *arg)
610 struct cls_fl_filter *f = arg;
615 static void *fl_get(struct tcf_proto *tp, u32 handle)
617 struct cls_fl_head *head = fl_head_dereference(tp);
619 return __fl_get(head, handle);
622 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
623 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
624 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
625 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
627 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
628 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
629 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
630 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
631 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
632 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
633 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
634 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
635 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
636 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
637 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
638 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
639 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
640 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
641 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
642 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
643 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
644 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
645 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
646 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
647 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
648 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
649 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
650 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
651 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
652 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
653 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
654 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
655 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
656 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
657 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
660 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
662 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
663 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
664 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
665 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
666 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
667 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
668 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
669 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
670 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
671 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
672 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
673 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
675 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
677 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
678 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
679 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
680 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
681 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
682 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
683 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
684 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
685 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
686 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
687 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
688 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
689 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
690 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
693 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
694 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
695 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
696 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
697 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
698 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
699 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
700 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
701 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
702 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
703 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
704 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
705 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
706 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
707 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
708 [TCA_FLOWER_KEY_CT_STATE] = { .type = NLA_U16 },
709 [TCA_FLOWER_KEY_CT_STATE_MASK] = { .type = NLA_U16 },
710 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
711 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
712 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
713 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
714 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
715 .len = 128 / BITS_PER_BYTE },
716 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
717 .len = 128 / BITS_PER_BYTE },
720 static const struct nla_policy
721 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
722 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
725 static const struct nla_policy
726 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
727 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
728 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
729 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
733 static void fl_set_key_val(struct nlattr **tb,
734 void *val, int val_type,
735 void *mask, int mask_type, int len)
739 nla_memcpy(val, tb[val_type], len);
740 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
741 memset(mask, 0xff, len);
743 nla_memcpy(mask, tb[mask_type], len);
746 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
747 struct fl_flow_key *mask)
749 fl_set_key_val(tb, &key->tp_min.dst,
750 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
751 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
752 fl_set_key_val(tb, &key->tp_max.dst,
753 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
754 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
755 fl_set_key_val(tb, &key->tp_min.src,
756 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
757 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
758 fl_set_key_val(tb, &key->tp_max.src,
759 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
760 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
762 if ((mask->tp_min.dst && mask->tp_max.dst &&
763 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
764 (mask->tp_min.src && mask->tp_max.src &&
765 htons(key->tp_max.src) <= htons(key->tp_min.src)))
771 static int fl_set_key_mpls(struct nlattr **tb,
772 struct flow_dissector_key_mpls *key_val,
773 struct flow_dissector_key_mpls *key_mask)
775 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
776 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
777 key_mask->mpls_ttl = MPLS_TTL_MASK;
779 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
780 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
782 if (bos & ~MPLS_BOS_MASK)
784 key_val->mpls_bos = bos;
785 key_mask->mpls_bos = MPLS_BOS_MASK;
787 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
788 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
790 if (tc & ~MPLS_TC_MASK)
792 key_val->mpls_tc = tc;
793 key_mask->mpls_tc = MPLS_TC_MASK;
795 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
796 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
798 if (label & ~MPLS_LABEL_MASK)
800 key_val->mpls_label = label;
801 key_mask->mpls_label = MPLS_LABEL_MASK;
806 static void fl_set_key_vlan(struct nlattr **tb,
808 int vlan_id_key, int vlan_prio_key,
809 struct flow_dissector_key_vlan *key_val,
810 struct flow_dissector_key_vlan *key_mask)
812 #define VLAN_PRIORITY_MASK 0x7
814 if (tb[vlan_id_key]) {
816 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
817 key_mask->vlan_id = VLAN_VID_MASK;
819 if (tb[vlan_prio_key]) {
820 key_val->vlan_priority =
821 nla_get_u8(tb[vlan_prio_key]) &
823 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
825 key_val->vlan_tpid = ethertype;
826 key_mask->vlan_tpid = cpu_to_be16(~0);
829 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
830 u32 *dissector_key, u32 *dissector_mask,
831 u32 flower_flag_bit, u32 dissector_flag_bit)
833 if (flower_mask & flower_flag_bit) {
834 *dissector_mask |= dissector_flag_bit;
835 if (flower_key & flower_flag_bit)
836 *dissector_key |= dissector_flag_bit;
840 static int fl_set_key_flags(struct nlattr **tb,
841 u32 *flags_key, u32 *flags_mask)
845 /* mask is mandatory for flags */
846 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
849 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
850 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
855 fl_set_key_flag(key, mask, flags_key, flags_mask,
856 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
857 fl_set_key_flag(key, mask, flags_key, flags_mask,
858 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
859 FLOW_DIS_FIRST_FRAG);
864 static void fl_set_key_ip(struct nlattr **tb, bool encap,
865 struct flow_dissector_key_ip *key,
866 struct flow_dissector_key_ip *mask)
868 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
869 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
870 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
871 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
873 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
874 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
877 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
878 int depth, int option_len,
879 struct netlink_ext_ack *extack)
881 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
882 struct nlattr *class = NULL, *type = NULL, *data = NULL;
883 struct geneve_opt *opt;
884 int err, data_len = 0;
886 if (option_len > sizeof(struct geneve_opt))
887 data_len = option_len - sizeof(struct geneve_opt);
889 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
890 memset(opt, 0xff, option_len);
891 opt->length = data_len / 4;
896 /* If no mask has been prodived we assume an exact match. */
898 return sizeof(struct geneve_opt) + data_len;
900 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
901 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
905 err = nla_parse_nested_deprecated(tb,
906 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
907 nla, geneve_opt_policy, extack);
911 /* We are not allowed to omit any of CLASS, TYPE or DATA
912 * fields from the key.
915 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
916 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
917 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
918 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
922 /* Omitting any of CLASS, TYPE or DATA fields is allowed
925 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
926 int new_len = key->enc_opts.len;
928 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
929 data_len = nla_len(data);
931 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
935 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
939 new_len += sizeof(struct geneve_opt) + data_len;
940 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
941 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
942 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
945 opt->length = data_len / 4;
946 memcpy(opt->opt_data, nla_data(data), data_len);
949 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
950 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
951 opt->opt_class = nla_get_be16(class);
954 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
955 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
956 opt->type = nla_get_u8(type);
959 return sizeof(struct geneve_opt) + data_len;
962 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
963 struct fl_flow_key *mask,
964 struct netlink_ext_ack *extack)
966 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
967 int err, option_len, key_depth, msk_depth = 0;
969 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
970 TCA_FLOWER_KEY_ENC_OPTS_MAX,
971 enc_opts_policy, extack);
975 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
977 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
978 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
979 TCA_FLOWER_KEY_ENC_OPTS_MAX,
980 enc_opts_policy, extack);
984 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
985 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
988 nla_for_each_attr(nla_opt_key, nla_enc_key,
989 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
990 switch (nla_type(nla_opt_key)) {
991 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
993 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
994 option_len = fl_set_geneve_opt(nla_opt_key, key,
995 key_depth, option_len,
1000 key->enc_opts.len += option_len;
1001 /* At the same time we need to parse through the mask
1002 * in order to verify exact and mask attribute lengths.
1004 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1005 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1006 msk_depth, option_len,
1011 mask->enc_opts.len += option_len;
1012 if (key->enc_opts.len != mask->enc_opts.len) {
1013 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1018 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1021 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1029 static int fl_set_key_ct(struct nlattr **tb,
1030 struct flow_dissector_key_ct *key,
1031 struct flow_dissector_key_ct *mask,
1032 struct netlink_ext_ack *extack)
1034 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1035 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1036 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1039 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1040 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1041 sizeof(key->ct_state));
1043 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1044 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1045 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1048 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1049 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1050 sizeof(key->ct_zone));
1052 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1053 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1054 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1057 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1058 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1059 sizeof(key->ct_mark));
1061 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1062 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1063 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1066 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1067 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1068 sizeof(key->ct_labels));
1074 static int fl_set_key(struct net *net, struct nlattr **tb,
1075 struct fl_flow_key *key, struct fl_flow_key *mask,
1076 struct netlink_ext_ack *extack)
1081 if (tb[TCA_FLOWER_INDEV]) {
1082 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1085 key->meta.ingress_ifindex = err;
1086 mask->meta.ingress_ifindex = 0xffffffff;
1089 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1090 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1091 sizeof(key->eth.dst));
1092 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1093 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1094 sizeof(key->eth.src));
1096 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1097 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1099 if (eth_type_vlan(ethertype)) {
1100 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1101 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1104 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1105 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1106 if (eth_type_vlan(ethertype)) {
1107 fl_set_key_vlan(tb, ethertype,
1108 TCA_FLOWER_KEY_CVLAN_ID,
1109 TCA_FLOWER_KEY_CVLAN_PRIO,
1110 &key->cvlan, &mask->cvlan);
1111 fl_set_key_val(tb, &key->basic.n_proto,
1112 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1113 &mask->basic.n_proto,
1115 sizeof(key->basic.n_proto));
1117 key->basic.n_proto = ethertype;
1118 mask->basic.n_proto = cpu_to_be16(~0);
1122 key->basic.n_proto = ethertype;
1123 mask->basic.n_proto = cpu_to_be16(~0);
1127 if (key->basic.n_proto == htons(ETH_P_IP) ||
1128 key->basic.n_proto == htons(ETH_P_IPV6)) {
1129 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1130 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1131 sizeof(key->basic.ip_proto));
1132 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1135 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1136 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1137 mask->control.addr_type = ~0;
1138 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1139 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1140 sizeof(key->ipv4.src));
1141 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1142 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1143 sizeof(key->ipv4.dst));
1144 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1145 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1146 mask->control.addr_type = ~0;
1147 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1148 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1149 sizeof(key->ipv6.src));
1150 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1151 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1152 sizeof(key->ipv6.dst));
1155 if (key->basic.ip_proto == IPPROTO_TCP) {
1156 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1157 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1158 sizeof(key->tp.src));
1159 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1160 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1161 sizeof(key->tp.dst));
1162 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1163 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1164 sizeof(key->tcp.flags));
1165 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1166 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1167 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1168 sizeof(key->tp.src));
1169 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1170 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1171 sizeof(key->tp.dst));
1172 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1173 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1174 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1175 sizeof(key->tp.src));
1176 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1177 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1178 sizeof(key->tp.dst));
1179 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1180 key->basic.ip_proto == IPPROTO_ICMP) {
1181 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1183 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1184 sizeof(key->icmp.type));
1185 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1187 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1188 sizeof(key->icmp.code));
1189 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1190 key->basic.ip_proto == IPPROTO_ICMPV6) {
1191 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1193 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1194 sizeof(key->icmp.type));
1195 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1197 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1198 sizeof(key->icmp.code));
1199 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1200 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1201 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1204 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1205 key->basic.n_proto == htons(ETH_P_RARP)) {
1206 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1207 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1208 sizeof(key->arp.sip));
1209 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1210 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1211 sizeof(key->arp.tip));
1212 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1213 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1214 sizeof(key->arp.op));
1215 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1216 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1217 sizeof(key->arp.sha));
1218 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1219 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1220 sizeof(key->arp.tha));
1223 if (key->basic.ip_proto == IPPROTO_TCP ||
1224 key->basic.ip_proto == IPPROTO_UDP ||
1225 key->basic.ip_proto == IPPROTO_SCTP) {
1226 ret = fl_set_key_port_range(tb, key, mask);
1231 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1232 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1233 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1234 mask->enc_control.addr_type = ~0;
1235 fl_set_key_val(tb, &key->enc_ipv4.src,
1236 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1237 &mask->enc_ipv4.src,
1238 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1239 sizeof(key->enc_ipv4.src));
1240 fl_set_key_val(tb, &key->enc_ipv4.dst,
1241 TCA_FLOWER_KEY_ENC_IPV4_DST,
1242 &mask->enc_ipv4.dst,
1243 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1244 sizeof(key->enc_ipv4.dst));
1247 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1248 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1249 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1250 mask->enc_control.addr_type = ~0;
1251 fl_set_key_val(tb, &key->enc_ipv6.src,
1252 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1253 &mask->enc_ipv6.src,
1254 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1255 sizeof(key->enc_ipv6.src));
1256 fl_set_key_val(tb, &key->enc_ipv6.dst,
1257 TCA_FLOWER_KEY_ENC_IPV6_DST,
1258 &mask->enc_ipv6.dst,
1259 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1260 sizeof(key->enc_ipv6.dst));
1263 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1264 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1265 sizeof(key->enc_key_id.keyid));
1267 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1268 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1269 sizeof(key->enc_tp.src));
1271 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1272 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1273 sizeof(key->enc_tp.dst));
1275 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1277 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1278 ret = fl_set_enc_opt(tb, key, mask, extack);
1283 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1287 if (tb[TCA_FLOWER_KEY_FLAGS])
1288 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1293 static void fl_mask_copy(struct fl_flow_mask *dst,
1294 struct fl_flow_mask *src)
1296 const void *psrc = fl_key_get_start(&src->key, src);
1297 void *pdst = fl_key_get_start(&dst->key, src);
1299 memcpy(pdst, psrc, fl_mask_range(src));
1300 dst->range = src->range;
1303 static const struct rhashtable_params fl_ht_params = {
1304 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1305 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1306 .automatic_shrinking = true,
1309 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1311 mask->filter_ht_params = fl_ht_params;
1312 mask->filter_ht_params.key_len = fl_mask_range(mask);
1313 mask->filter_ht_params.key_offset += mask->range.start;
1315 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1318 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1319 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1321 #define FL_KEY_IS_MASKED(mask, member) \
1322 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1323 0, FL_KEY_MEMBER_SIZE(member)) \
1325 #define FL_KEY_SET(keys, cnt, id, member) \
1327 keys[cnt].key_id = id; \
1328 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1332 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1334 if (FL_KEY_IS_MASKED(mask, member)) \
1335 FL_KEY_SET(keys, cnt, id, member); \
1338 static void fl_init_dissector(struct flow_dissector *dissector,
1339 struct fl_flow_key *mask)
1341 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1344 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1345 FLOW_DISSECTOR_KEY_META, meta);
1346 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1347 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1348 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1349 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1350 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1351 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1352 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1353 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1354 if (FL_KEY_IS_MASKED(mask, tp) ||
1355 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1356 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
1357 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1358 FLOW_DISSECTOR_KEY_IP, ip);
1359 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1360 FLOW_DISSECTOR_KEY_TCP, tcp);
1361 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1362 FLOW_DISSECTOR_KEY_ICMP, icmp);
1363 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1364 FLOW_DISSECTOR_KEY_ARP, arp);
1365 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1366 FLOW_DISSECTOR_KEY_MPLS, mpls);
1367 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1368 FLOW_DISSECTOR_KEY_VLAN, vlan);
1369 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1370 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1371 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1372 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1373 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1374 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1375 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1376 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1377 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1378 FL_KEY_IS_MASKED(mask, enc_ipv6))
1379 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1381 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1382 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1383 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1384 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1385 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1386 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1387 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1388 FLOW_DISSECTOR_KEY_CT, ct);
1390 skb_flow_dissector_init(dissector, keys, cnt);
1393 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1394 struct fl_flow_mask *mask)
1396 struct fl_flow_mask *newmask;
1399 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1401 return ERR_PTR(-ENOMEM);
1403 fl_mask_copy(newmask, mask);
1405 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1406 (newmask->key.tp_min.src && newmask->key.tp_max.src))
1407 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1409 err = fl_init_mask_hashtable(newmask);
1413 fl_init_dissector(&newmask->dissector, &newmask->key);
1415 INIT_LIST_HEAD_RCU(&newmask->filters);
1417 refcount_set(&newmask->refcnt, 1);
1418 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1419 &newmask->ht_node, mask_ht_params);
1421 goto errout_destroy;
1423 spin_lock(&head->masks_lock);
1424 list_add_tail_rcu(&newmask->list, &head->masks);
1425 spin_unlock(&head->masks_lock);
1430 rhashtable_destroy(&newmask->ht);
1434 return ERR_PTR(err);
1437 static int fl_check_assign_mask(struct cls_fl_head *head,
1438 struct cls_fl_filter *fnew,
1439 struct cls_fl_filter *fold,
1440 struct fl_flow_mask *mask)
1442 struct fl_flow_mask *newmask;
1447 /* Insert mask as temporary node to prevent concurrent creation of mask
1448 * with same key. Any concurrent lookups with same key will return
1449 * -EAGAIN because mask's refcnt is zero.
1451 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1459 goto errout_cleanup;
1462 newmask = fl_create_new_mask(head, mask);
1463 if (IS_ERR(newmask)) {
1464 ret = PTR_ERR(newmask);
1465 goto errout_cleanup;
1468 fnew->mask = newmask;
1470 } else if (IS_ERR(fnew->mask)) {
1471 ret = PTR_ERR(fnew->mask);
1472 } else if (fold && fold->mask != fnew->mask) {
1474 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1475 /* Mask was deleted concurrently, try again */
1482 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1487 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1488 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1489 unsigned long base, struct nlattr **tb,
1490 struct nlattr *est, bool ovr,
1491 struct fl_flow_tmplt *tmplt, bool rtnl_held,
1492 struct netlink_ext_ack *extack)
1496 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1501 if (tb[TCA_FLOWER_CLASSID]) {
1502 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1505 tcf_bind_filter(tp, &f->res, base);
1510 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1514 fl_mask_update_range(mask);
1515 fl_set_masked_key(&f->mkey, &f->key, mask);
1517 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1518 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1525 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1526 struct cls_fl_filter *fold,
1529 struct fl_flow_mask *mask = fnew->mask;
1532 err = rhashtable_lookup_insert_fast(&mask->ht,
1534 mask->filter_ht_params);
1537 /* It is okay if filter with same key exists when
1540 return fold && err == -EEXIST ? 0 : err;
1547 static int fl_change(struct net *net, struct sk_buff *in_skb,
1548 struct tcf_proto *tp, unsigned long base,
1549 u32 handle, struct nlattr **tca,
1550 void **arg, bool ovr, bool rtnl_held,
1551 struct netlink_ext_ack *extack)
1553 struct cls_fl_head *head = fl_head_dereference(tp);
1554 struct cls_fl_filter *fold = *arg;
1555 struct cls_fl_filter *fnew;
1556 struct fl_flow_mask *mask;
1561 if (!tca[TCA_OPTIONS]) {
1566 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1572 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1575 goto errout_mask_alloc;
1578 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1579 tca[TCA_OPTIONS], fl_policy, NULL);
1583 if (fold && handle && fold->handle != handle) {
1588 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1593 INIT_LIST_HEAD(&fnew->hw_list);
1594 refcount_set(&fnew->refcnt, 1);
1596 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1600 if (tb[TCA_FLOWER_FLAGS]) {
1601 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1603 if (!tc_flags_valid(fnew->flags)) {
1609 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1610 tp->chain->tmplt_priv, rtnl_held, extack);
1614 err = fl_check_assign_mask(head, fnew, fold, mask);
1618 err = fl_ht_insert_unique(fnew, fold, &in_ht);
1622 if (!tc_skip_hw(fnew->flags)) {
1623 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1628 if (!tc_in_hw(fnew->flags))
1629 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1631 spin_lock(&tp->lock);
1633 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1634 * proto again or create new one, if necessary.
1642 /* Fold filter was deleted concurrently. Retry lookup. */
1643 if (fold->deleted) {
1648 fnew->handle = handle;
1651 struct rhashtable_params params =
1652 fnew->mask->filter_ht_params;
1654 err = rhashtable_insert_fast(&fnew->mask->ht,
1662 refcount_inc(&fnew->refcnt);
1663 rhashtable_remove_fast(&fold->mask->ht,
1665 fold->mask->filter_ht_params);
1666 idr_replace(&head->handle_idr, fnew, fnew->handle);
1667 list_replace_rcu(&fold->list, &fnew->list);
1668 fold->deleted = true;
1670 spin_unlock(&tp->lock);
1672 fl_mask_put(head, fold->mask);
1673 if (!tc_skip_hw(fold->flags))
1674 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1675 tcf_unbind_filter(tp, &fold->res);
1676 /* Caller holds reference to fold, so refcnt is always > 0
1679 refcount_dec(&fold->refcnt);
1683 /* user specifies a handle and it doesn't exist */
1684 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1685 handle, GFP_ATOMIC);
1687 /* Filter with specified handle was concurrently
1688 * inserted after initial check in cls_api. This is not
1689 * necessarily an error if NLM_F_EXCL is not set in
1690 * message flags. Returning EAGAIN will cause cls_api to
1691 * try to update concurrently inserted rule.
1697 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1698 INT_MAX, GFP_ATOMIC);
1703 refcount_inc(&fnew->refcnt);
1704 fnew->handle = handle;
1705 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1706 spin_unlock(&tp->lock);
1712 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1716 spin_lock(&tp->lock);
1718 fnew->deleted = true;
1719 spin_unlock(&tp->lock);
1720 if (!tc_skip_hw(fnew->flags))
1721 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1723 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1724 fnew->mask->filter_ht_params);
1726 fl_mask_put(head, fnew->mask);
1732 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1739 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1740 bool rtnl_held, struct netlink_ext_ack *extack)
1742 struct cls_fl_head *head = fl_head_dereference(tp);
1743 struct cls_fl_filter *f = arg;
1747 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1748 *last = list_empty(&head->masks);
1754 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1757 struct cls_fl_head *head = fl_head_dereference(tp);
1758 unsigned long id = arg->cookie, tmp;
1759 struct cls_fl_filter *f;
1761 arg->count = arg->skip;
1763 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
1764 /* don't return filters that are being deleted */
1765 if (!refcount_inc_not_zero(&f->refcnt))
1767 if (arg->fn(tp, f, arg) < 0) {
1778 static struct cls_fl_filter *
1779 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1781 struct cls_fl_head *head = fl_head_dereference(tp);
1783 spin_lock(&tp->lock);
1784 if (list_empty(&head->hw_filters)) {
1785 spin_unlock(&tp->lock);
1790 f = list_entry(&head->hw_filters, struct cls_fl_filter,
1792 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1793 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1794 spin_unlock(&tp->lock);
1799 spin_unlock(&tp->lock);
1803 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1804 void *cb_priv, struct netlink_ext_ack *extack)
1806 struct tcf_block *block = tp->chain->block;
1807 struct flow_cls_offload cls_flower = {};
1808 struct cls_fl_filter *f = NULL;
1811 /* hw_filters list can only be changed by hw offload functions after
1812 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1817 while ((f = fl_get_next_hw_filter(tp, f, add))) {
1819 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1820 if (!cls_flower.rule) {
1825 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1827 cls_flower.command = add ?
1828 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
1829 cls_flower.cookie = (unsigned long)f;
1830 cls_flower.rule->match.dissector = &f->mask->dissector;
1831 cls_flower.rule->match.mask = &f->mask->key;
1832 cls_flower.rule->match.key = &f->mkey;
1834 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1836 kfree(cls_flower.rule);
1837 if (tc_skip_sw(f->flags)) {
1838 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1845 cls_flower.classid = f->res.classid;
1847 err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1848 kfree(cls_flower.rule);
1851 if (add && tc_skip_sw(f->flags)) {
1858 spin_lock(&tp->lock);
1859 tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
1861 spin_unlock(&tp->lock);
1869 static int fl_hw_create_tmplt(struct tcf_chain *chain,
1870 struct fl_flow_tmplt *tmplt)
1872 struct flow_cls_offload cls_flower = {};
1873 struct tcf_block *block = chain->block;
1875 cls_flower.rule = flow_rule_alloc(0);
1876 if (!cls_flower.rule)
1879 cls_flower.common.chain_index = chain->index;
1880 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
1881 cls_flower.cookie = (unsigned long) tmplt;
1882 cls_flower.rule->match.dissector = &tmplt->dissector;
1883 cls_flower.rule->match.mask = &tmplt->mask;
1884 cls_flower.rule->match.key = &tmplt->dummy_key;
1886 /* We don't care if driver (any of them) fails to handle this
1887 * call. It serves just as a hint for it.
1889 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
1890 kfree(cls_flower.rule);
1895 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1896 struct fl_flow_tmplt *tmplt)
1898 struct flow_cls_offload cls_flower = {};
1899 struct tcf_block *block = chain->block;
1901 cls_flower.common.chain_index = chain->index;
1902 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
1903 cls_flower.cookie = (unsigned long) tmplt;
1905 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
1908 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1909 struct nlattr **tca,
1910 struct netlink_ext_ack *extack)
1912 struct fl_flow_tmplt *tmplt;
1916 if (!tca[TCA_OPTIONS])
1917 return ERR_PTR(-EINVAL);
1919 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1921 return ERR_PTR(-ENOBUFS);
1922 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1923 tca[TCA_OPTIONS], fl_policy, NULL);
1927 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
1932 tmplt->chain = chain;
1933 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1937 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1939 err = fl_hw_create_tmplt(chain, tmplt);
1950 return ERR_PTR(err);
1953 static void fl_tmplt_destroy(void *tmplt_priv)
1955 struct fl_flow_tmplt *tmplt = tmplt_priv;
1957 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1961 static int fl_dump_key_val(struct sk_buff *skb,
1962 void *val, int val_type,
1963 void *mask, int mask_type, int len)
1967 if (!memchr_inv(mask, 0, len))
1969 err = nla_put(skb, val_type, len, val);
1972 if (mask_type != TCA_FLOWER_UNSPEC) {
1973 err = nla_put(skb, mask_type, len, mask);
1980 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1981 struct fl_flow_key *mask)
1983 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1984 &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1985 sizeof(key->tp_min.dst)) ||
1986 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1987 &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1988 sizeof(key->tp_max.dst)) ||
1989 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1990 &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1991 sizeof(key->tp_min.src)) ||
1992 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1993 &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1994 sizeof(key->tp_max.src)))
2000 static int fl_dump_key_mpls(struct sk_buff *skb,
2001 struct flow_dissector_key_mpls *mpls_key,
2002 struct flow_dissector_key_mpls *mpls_mask)
2006 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
2008 if (mpls_mask->mpls_ttl) {
2009 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2010 mpls_key->mpls_ttl);
2014 if (mpls_mask->mpls_tc) {
2015 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2020 if (mpls_mask->mpls_label) {
2021 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2022 mpls_key->mpls_label);
2026 if (mpls_mask->mpls_bos) {
2027 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2028 mpls_key->mpls_bos);
2035 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2036 struct flow_dissector_key_ip *key,
2037 struct flow_dissector_key_ip *mask)
2039 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2040 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2041 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2042 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2044 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2045 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2051 static int fl_dump_key_vlan(struct sk_buff *skb,
2052 int vlan_id_key, int vlan_prio_key,
2053 struct flow_dissector_key_vlan *vlan_key,
2054 struct flow_dissector_key_vlan *vlan_mask)
2058 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2060 if (vlan_mask->vlan_id) {
2061 err = nla_put_u16(skb, vlan_id_key,
2066 if (vlan_mask->vlan_priority) {
2067 err = nla_put_u8(skb, vlan_prio_key,
2068 vlan_key->vlan_priority);
2075 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2076 u32 *flower_key, u32 *flower_mask,
2077 u32 flower_flag_bit, u32 dissector_flag_bit)
2079 if (dissector_mask & dissector_flag_bit) {
2080 *flower_mask |= flower_flag_bit;
2081 if (dissector_key & dissector_flag_bit)
2082 *flower_key |= flower_flag_bit;
2086 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2092 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2098 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2099 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2100 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2101 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2102 FLOW_DIS_FIRST_FRAG);
2104 _key = cpu_to_be32(key);
2105 _mask = cpu_to_be32(mask);
2107 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2111 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2114 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2115 struct flow_dissector_key_enc_opts *enc_opts)
2117 struct geneve_opt *opt;
2118 struct nlattr *nest;
2121 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2123 goto nla_put_failure;
2125 while (enc_opts->len > opt_off) {
2126 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2128 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2130 goto nla_put_failure;
2131 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2133 goto nla_put_failure;
2134 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2135 opt->length * 4, opt->opt_data))
2136 goto nla_put_failure;
2138 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2140 nla_nest_end(skb, nest);
2144 nla_nest_cancel(skb, nest);
2148 static int fl_dump_key_ct(struct sk_buff *skb,
2149 struct flow_dissector_key_ct *key,
2150 struct flow_dissector_key_ct *mask)
2152 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2153 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2154 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2155 sizeof(key->ct_state)))
2156 goto nla_put_failure;
2158 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2159 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2160 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2161 sizeof(key->ct_zone)))
2162 goto nla_put_failure;
2164 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2165 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2166 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2167 sizeof(key->ct_mark)))
2168 goto nla_put_failure;
2170 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2171 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2172 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2173 sizeof(key->ct_labels)))
2174 goto nla_put_failure;
2182 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2183 struct flow_dissector_key_enc_opts *enc_opts)
2185 struct nlattr *nest;
2191 nest = nla_nest_start_noflag(skb, enc_opt_type);
2193 goto nla_put_failure;
2195 switch (enc_opts->dst_opt_type) {
2196 case TUNNEL_GENEVE_OPT:
2197 err = fl_dump_key_geneve_opt(skb, enc_opts);
2199 goto nla_put_failure;
2202 goto nla_put_failure;
2204 nla_nest_end(skb, nest);
2208 nla_nest_cancel(skb, nest);
2212 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2213 struct flow_dissector_key_enc_opts *key_opts,
2214 struct flow_dissector_key_enc_opts *msk_opts)
2218 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2222 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2225 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2226 struct fl_flow_key *key, struct fl_flow_key *mask)
2228 if (mask->meta.ingress_ifindex) {
2229 struct net_device *dev;
2231 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2232 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2233 goto nla_put_failure;
2236 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2237 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2238 sizeof(key->eth.dst)) ||
2239 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2240 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2241 sizeof(key->eth.src)) ||
2242 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2243 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2244 sizeof(key->basic.n_proto)))
2245 goto nla_put_failure;
2247 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2248 goto nla_put_failure;
2250 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2251 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2252 goto nla_put_failure;
2254 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2255 TCA_FLOWER_KEY_CVLAN_PRIO,
2256 &key->cvlan, &mask->cvlan) ||
2257 (mask->cvlan.vlan_tpid &&
2258 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2259 key->cvlan.vlan_tpid)))
2260 goto nla_put_failure;
2262 if (mask->basic.n_proto) {
2263 if (mask->cvlan.vlan_tpid) {
2264 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2265 key->basic.n_proto))
2266 goto nla_put_failure;
2267 } else if (mask->vlan.vlan_tpid) {
2268 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2269 key->basic.n_proto))
2270 goto nla_put_failure;
2274 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2275 key->basic.n_proto == htons(ETH_P_IPV6)) &&
2276 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2277 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2278 sizeof(key->basic.ip_proto)) ||
2279 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2280 goto nla_put_failure;
2282 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2283 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2284 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2285 sizeof(key->ipv4.src)) ||
2286 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2287 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2288 sizeof(key->ipv4.dst))))
2289 goto nla_put_failure;
2290 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2291 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2292 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2293 sizeof(key->ipv6.src)) ||
2294 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2295 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2296 sizeof(key->ipv6.dst))))
2297 goto nla_put_failure;
2299 if (key->basic.ip_proto == IPPROTO_TCP &&
2300 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2301 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2302 sizeof(key->tp.src)) ||
2303 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2304 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2305 sizeof(key->tp.dst)) ||
2306 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2307 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2308 sizeof(key->tcp.flags))))
2309 goto nla_put_failure;
2310 else if (key->basic.ip_proto == IPPROTO_UDP &&
2311 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2312 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2313 sizeof(key->tp.src)) ||
2314 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2315 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2316 sizeof(key->tp.dst))))
2317 goto nla_put_failure;
2318 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2319 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2320 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2321 sizeof(key->tp.src)) ||
2322 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2323 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2324 sizeof(key->tp.dst))))
2325 goto nla_put_failure;
2326 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2327 key->basic.ip_proto == IPPROTO_ICMP &&
2328 (fl_dump_key_val(skb, &key->icmp.type,
2329 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2330 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2331 sizeof(key->icmp.type)) ||
2332 fl_dump_key_val(skb, &key->icmp.code,
2333 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2334 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2335 sizeof(key->icmp.code))))
2336 goto nla_put_failure;
2337 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2338 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2339 (fl_dump_key_val(skb, &key->icmp.type,
2340 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2341 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2342 sizeof(key->icmp.type)) ||
2343 fl_dump_key_val(skb, &key->icmp.code,
2344 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2345 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2346 sizeof(key->icmp.code))))
2347 goto nla_put_failure;
2348 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2349 key->basic.n_proto == htons(ETH_P_RARP)) &&
2350 (fl_dump_key_val(skb, &key->arp.sip,
2351 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2352 TCA_FLOWER_KEY_ARP_SIP_MASK,
2353 sizeof(key->arp.sip)) ||
2354 fl_dump_key_val(skb, &key->arp.tip,
2355 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2356 TCA_FLOWER_KEY_ARP_TIP_MASK,
2357 sizeof(key->arp.tip)) ||
2358 fl_dump_key_val(skb, &key->arp.op,
2359 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2360 TCA_FLOWER_KEY_ARP_OP_MASK,
2361 sizeof(key->arp.op)) ||
2362 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2363 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2364 sizeof(key->arp.sha)) ||
2365 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2366 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2367 sizeof(key->arp.tha))))
2368 goto nla_put_failure;
2370 if ((key->basic.ip_proto == IPPROTO_TCP ||
2371 key->basic.ip_proto == IPPROTO_UDP ||
2372 key->basic.ip_proto == IPPROTO_SCTP) &&
2373 fl_dump_key_port_range(skb, key, mask))
2374 goto nla_put_failure;
2376 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2377 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2378 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2379 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2380 sizeof(key->enc_ipv4.src)) ||
2381 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2382 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2383 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2384 sizeof(key->enc_ipv4.dst))))
2385 goto nla_put_failure;
2386 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2387 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2388 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2389 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2390 sizeof(key->enc_ipv6.src)) ||
2391 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2392 TCA_FLOWER_KEY_ENC_IPV6_DST,
2393 &mask->enc_ipv6.dst,
2394 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2395 sizeof(key->enc_ipv6.dst))))
2396 goto nla_put_failure;
2398 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2399 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2400 sizeof(key->enc_key_id)) ||
2401 fl_dump_key_val(skb, &key->enc_tp.src,
2402 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2404 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2405 sizeof(key->enc_tp.src)) ||
2406 fl_dump_key_val(skb, &key->enc_tp.dst,
2407 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2409 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2410 sizeof(key->enc_tp.dst)) ||
2411 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2412 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2413 goto nla_put_failure;
2415 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2416 goto nla_put_failure;
2418 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2419 goto nla_put_failure;
2427 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2428 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2430 struct cls_fl_filter *f = fh;
2431 struct nlattr *nest;
2432 struct fl_flow_key *key, *mask;
2438 t->tcm_handle = f->handle;
2440 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2442 goto nla_put_failure;
2444 spin_lock(&tp->lock);
2446 if (f->res.classid &&
2447 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2448 goto nla_put_failure_locked;
2451 mask = &f->mask->key;
2452 skip_hw = tc_skip_hw(f->flags);
2454 if (fl_dump_key(skb, net, key, mask))
2455 goto nla_put_failure_locked;
2457 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2458 goto nla_put_failure_locked;
2460 spin_unlock(&tp->lock);
2463 fl_hw_update_stats(tp, f, rtnl_held);
2465 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2466 goto nla_put_failure;
2468 if (tcf_exts_dump(skb, &f->exts))
2469 goto nla_put_failure;
2471 nla_nest_end(skb, nest);
2473 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2474 goto nla_put_failure;
2478 nla_put_failure_locked:
2479 spin_unlock(&tp->lock);
2481 nla_nest_cancel(skb, nest);
2485 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2487 struct fl_flow_tmplt *tmplt = tmplt_priv;
2488 struct fl_flow_key *key, *mask;
2489 struct nlattr *nest;
2491 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2493 goto nla_put_failure;
2495 key = &tmplt->dummy_key;
2496 mask = &tmplt->mask;
2498 if (fl_dump_key(skb, net, key, mask))
2499 goto nla_put_failure;
2501 nla_nest_end(skb, nest);
2506 nla_nest_cancel(skb, nest);
2510 static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2512 struct cls_fl_filter *f = fh;
2514 if (f && f->res.classid == classid)
2518 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2520 .classify = fl_classify,
2522 .destroy = fl_destroy,
2525 .change = fl_change,
2526 .delete = fl_delete,
2528 .reoffload = fl_reoffload,
2530 .bind_class = fl_bind_class,
2531 .tmplt_create = fl_tmplt_create,
2532 .tmplt_destroy = fl_tmplt_destroy,
2533 .tmplt_dump = fl_tmplt_dump,
2534 .owner = THIS_MODULE,
2535 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
2538 static int __init cls_fl_init(void)
2540 return register_tcf_proto_ops(&cls_fl_ops);
2543 static void __exit cls_fl_exit(void)
2545 unregister_tcf_proto_ops(&cls_fl_ops);
2548 module_init(cls_fl_init);
2549 module_exit(cls_fl_exit);
2551 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2552 MODULE_DESCRIPTION("Flower classifier");
2553 MODULE_LICENSE("GPL v2");