1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_api.c Packet classifier API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <net/net_namespace.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
29 #include <net/tc_act/tc_pedit.h>
30 #include <net/tc_act/tc_mirred.h>
31 #include <net/tc_act/tc_vlan.h>
32 #include <net/tc_act/tc_tunnel_key.h>
33 #include <net/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_gact.h>
35 #include <net/tc_act/tc_police.h>
36 #include <net/tc_act/tc_sample.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <net/tc_act/tc_ct.h>
39 #include <net/tc_act/tc_mpls.h>
40 #include <net/flow_offload.h>
42 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
44 /* The list of all installed classifier types */
45 static LIST_HEAD(tcf_proto_base);
47 /* Protects list of registered TC modules. It is pure SMP lock. */
48 static DEFINE_RWLOCK(cls_mod_lock);
50 /* Find classifier type by string name */
52 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
54 const struct tcf_proto_ops *t, *res = NULL;
57 read_lock(&cls_mod_lock);
58 list_for_each_entry(t, &tcf_proto_base, head) {
59 if (strcmp(kind, t->kind) == 0) {
60 if (try_module_get(t->owner))
65 read_unlock(&cls_mod_lock);
70 static const struct tcf_proto_ops *
71 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
72 struct netlink_ext_ack *extack)
74 const struct tcf_proto_ops *ops;
76 ops = __tcf_proto_lookup_ops(kind);
82 request_module("cls_%s", kind);
85 ops = __tcf_proto_lookup_ops(kind);
86 /* We dropped the RTNL semaphore in order to perform
87 * the module load. So, even if we succeeded in loading
88 * the module we have to replay the request. We indicate
92 module_put(ops->owner);
93 return ERR_PTR(-EAGAIN);
96 NL_SET_ERR_MSG(extack, "TC classifier not found");
97 return ERR_PTR(-ENOENT);
100 /* Register(unregister) new classifier type */
102 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
104 struct tcf_proto_ops *t;
107 write_lock(&cls_mod_lock);
108 list_for_each_entry(t, &tcf_proto_base, head)
109 if (!strcmp(ops->kind, t->kind))
112 list_add_tail(&ops->head, &tcf_proto_base);
115 write_unlock(&cls_mod_lock);
118 EXPORT_SYMBOL(register_tcf_proto_ops);
120 static struct workqueue_struct *tc_filter_wq;
122 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
124 struct tcf_proto_ops *t;
127 /* Wait for outstanding call_rcu()s, if any, from a
128 * tcf_proto_ops's destroy() handler.
131 flush_workqueue(tc_filter_wq);
133 write_lock(&cls_mod_lock);
134 list_for_each_entry(t, &tcf_proto_base, head) {
141 write_unlock(&cls_mod_lock);
144 EXPORT_SYMBOL(unregister_tcf_proto_ops);
146 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
148 INIT_RCU_WORK(rwork, func);
149 return queue_rcu_work(tc_filter_wq, rwork);
151 EXPORT_SYMBOL(tcf_queue_work);
153 /* Select new prio value from the range, managed by kernel. */
155 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
157 u32 first = TC_H_MAKE(0xC0000000U, 0U);
160 first = tp->prio - 1;
162 return TC_H_MAJ(first);
165 static bool tcf_proto_is_unlocked(const char *kind)
167 const struct tcf_proto_ops *ops;
170 ops = tcf_proto_lookup_ops(kind, false, NULL);
171 /* On error return false to take rtnl lock. Proto lookup/create
172 * functions will perform lookup again and properly handle errors.
177 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
178 module_put(ops->owner);
182 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
183 u32 prio, struct tcf_chain *chain,
185 struct netlink_ext_ack *extack)
187 struct tcf_proto *tp;
190 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
192 return ERR_PTR(-ENOBUFS);
194 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
195 if (IS_ERR(tp->ops)) {
196 err = PTR_ERR(tp->ops);
199 tp->classify = tp->ops->classify;
200 tp->protocol = protocol;
203 spin_lock_init(&tp->lock);
204 refcount_set(&tp->refcnt, 1);
206 err = tp->ops->init(tp);
208 module_put(tp->ops->owner);
218 static void tcf_proto_get(struct tcf_proto *tp)
220 refcount_inc(&tp->refcnt);
223 static void tcf_chain_put(struct tcf_chain *chain);
225 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
226 struct netlink_ext_ack *extack)
228 tp->ops->destroy(tp, rtnl_held, extack);
229 tcf_chain_put(tp->chain);
230 module_put(tp->ops->owner);
234 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
235 struct netlink_ext_ack *extack)
237 if (refcount_dec_and_test(&tp->refcnt))
238 tcf_proto_destroy(tp, rtnl_held, extack);
241 static int walker_check_empty(struct tcf_proto *tp, void *fh,
242 struct tcf_walker *arg)
245 arg->nonempty = true;
251 static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
253 struct tcf_walker walker = { .fn = walker_check_empty, };
256 tp->ops->walk(tp, &walker, rtnl_held);
257 return !walker.nonempty;
262 static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
264 spin_lock(&tp->lock);
265 if (tcf_proto_is_empty(tp, rtnl_held))
267 spin_unlock(&tp->lock);
271 static void tcf_proto_mark_delete(struct tcf_proto *tp)
273 spin_lock(&tp->lock);
275 spin_unlock(&tp->lock);
278 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
282 spin_lock(&tp->lock);
283 deleting = tp->deleting;
284 spin_unlock(&tp->lock);
289 #define ASSERT_BLOCK_LOCKED(block) \
290 lockdep_assert_held(&(block)->lock)
292 struct tcf_filter_chain_list_item {
293 struct list_head list;
294 tcf_chain_head_change_t *chain_head_change;
295 void *chain_head_change_priv;
298 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
301 struct tcf_chain *chain;
303 ASSERT_BLOCK_LOCKED(block);
305 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
308 list_add_tail(&chain->list, &block->chain_list);
309 mutex_init(&chain->filter_chain_lock);
310 chain->block = block;
311 chain->index = chain_index;
314 block->chain0.chain = chain;
318 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
319 struct tcf_proto *tp_head)
321 if (item->chain_head_change)
322 item->chain_head_change(tp_head, item->chain_head_change_priv);
325 static void tcf_chain0_head_change(struct tcf_chain *chain,
326 struct tcf_proto *tp_head)
328 struct tcf_filter_chain_list_item *item;
329 struct tcf_block *block = chain->block;
334 mutex_lock(&block->lock);
335 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
336 tcf_chain_head_change_item(item, tp_head);
337 mutex_unlock(&block->lock);
340 /* Returns true if block can be safely freed. */
342 static bool tcf_chain_detach(struct tcf_chain *chain)
344 struct tcf_block *block = chain->block;
346 ASSERT_BLOCK_LOCKED(block);
348 list_del(&chain->list);
350 block->chain0.chain = NULL;
352 if (list_empty(&block->chain_list) &&
353 refcount_read(&block->refcnt) == 0)
359 static void tcf_block_destroy(struct tcf_block *block)
361 mutex_destroy(&block->lock);
362 kfree_rcu(block, rcu);
365 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
367 struct tcf_block *block = chain->block;
369 mutex_destroy(&chain->filter_chain_lock);
370 kfree_rcu(chain, rcu);
372 tcf_block_destroy(block);
375 static void tcf_chain_hold(struct tcf_chain *chain)
377 ASSERT_BLOCK_LOCKED(chain->block);
382 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
384 ASSERT_BLOCK_LOCKED(chain->block);
386 /* In case all the references are action references, this
387 * chain should not be shown to the user.
389 return chain->refcnt == chain->action_refcnt;
392 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
395 struct tcf_chain *chain;
397 ASSERT_BLOCK_LOCKED(block);
399 list_for_each_entry(chain, &block->chain_list, list) {
400 if (chain->index == chain_index)
406 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
407 u32 seq, u16 flags, int event, bool unicast);
409 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
410 u32 chain_index, bool create,
413 struct tcf_chain *chain = NULL;
414 bool is_first_reference;
416 mutex_lock(&block->lock);
417 chain = tcf_chain_lookup(block, chain_index);
419 tcf_chain_hold(chain);
423 chain = tcf_chain_create(block, chain_index);
429 ++chain->action_refcnt;
430 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
431 mutex_unlock(&block->lock);
433 /* Send notification only in case we got the first
434 * non-action reference. Until then, the chain acts only as
435 * a placeholder for actions pointing to it and user ought
436 * not know about them.
438 if (is_first_reference && !by_act)
439 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
440 RTM_NEWCHAIN, false);
445 mutex_unlock(&block->lock);
449 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
452 return __tcf_chain_get(block, chain_index, create, false);
455 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
457 return __tcf_chain_get(block, chain_index, true, true);
459 EXPORT_SYMBOL(tcf_chain_get_by_act);
461 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
463 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
464 void *tmplt_priv, u32 chain_index,
465 struct tcf_block *block, struct sk_buff *oskb,
466 u32 seq, u16 flags, bool unicast);
468 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
469 bool explicitly_created)
471 struct tcf_block *block = chain->block;
472 const struct tcf_proto_ops *tmplt_ops;
473 bool free_block = false;
477 mutex_lock(&block->lock);
478 if (explicitly_created) {
479 if (!chain->explicitly_created) {
480 mutex_unlock(&block->lock);
483 chain->explicitly_created = false;
487 chain->action_refcnt--;
489 /* tc_chain_notify_delete can't be called while holding block lock.
490 * However, when block is unlocked chain can be changed concurrently, so
491 * save these to temporary variables.
493 refcnt = --chain->refcnt;
494 tmplt_ops = chain->tmplt_ops;
495 tmplt_priv = chain->tmplt_priv;
497 /* The last dropped non-action reference will trigger notification. */
498 if (refcnt - chain->action_refcnt == 0 && !by_act) {
499 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
500 block, NULL, 0, 0, false);
501 /* Last reference to chain, no need to lock. */
502 chain->flushing = false;
506 free_block = tcf_chain_detach(chain);
507 mutex_unlock(&block->lock);
510 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
511 tcf_chain_destroy(chain, free_block);
515 static void tcf_chain_put(struct tcf_chain *chain)
517 __tcf_chain_put(chain, false, false);
520 void tcf_chain_put_by_act(struct tcf_chain *chain)
522 __tcf_chain_put(chain, true, false);
524 EXPORT_SYMBOL(tcf_chain_put_by_act);
526 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
528 __tcf_chain_put(chain, false, true);
531 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
533 struct tcf_proto *tp, *tp_next;
535 mutex_lock(&chain->filter_chain_lock);
536 tp = tcf_chain_dereference(chain->filter_chain, chain);
537 RCU_INIT_POINTER(chain->filter_chain, NULL);
538 tcf_chain0_head_change(chain, NULL);
539 chain->flushing = true;
540 mutex_unlock(&chain->filter_chain_lock);
543 tp_next = rcu_dereference_protected(tp->next, 1);
544 tcf_proto_put(tp, rtnl_held, NULL);
549 static int tcf_block_setup(struct tcf_block *block,
550 struct flow_block_offload *bo);
552 static void tc_indr_block_ing_cmd(struct net_device *dev,
553 struct tcf_block *block,
554 flow_indr_block_bind_cb_t *cb,
556 enum flow_block_command command)
558 struct flow_block_offload bo = {
560 .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
562 .block_shared = tcf_block_non_null_shared(block),
564 INIT_LIST_HEAD(&bo.cb_list);
569 bo.block = &block->flow_block;
571 down_write(&block->cb_lock);
572 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
574 tcf_block_setup(block, &bo);
575 up_write(&block->cb_lock);
578 static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
580 const struct Qdisc_class_ops *cops;
583 if (!dev_ingress_queue(dev))
586 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
590 cops = qdisc->ops->cl_ops;
594 if (!cops->tcf_block)
597 return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
600 static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
601 flow_indr_block_bind_cb_t *cb,
603 enum flow_block_command command)
605 struct tcf_block *block = tc_dev_ingress_block(dev);
607 tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
610 static void tc_indr_block_call(struct tcf_block *block,
611 struct net_device *dev,
612 struct tcf_block_ext_info *ei,
613 enum flow_block_command command,
614 struct netlink_ext_ack *extack)
616 struct flow_block_offload bo = {
618 .binder_type = ei->binder_type,
620 .block = &block->flow_block,
621 .block_shared = tcf_block_shared(block),
624 INIT_LIST_HEAD(&bo.cb_list);
626 flow_indr_block_call(dev, &bo, command);
627 tcf_block_setup(block, &bo);
630 static bool tcf_block_offload_in_use(struct tcf_block *block)
632 return atomic_read(&block->offloadcnt);
635 static int tcf_block_offload_cmd(struct tcf_block *block,
636 struct net_device *dev,
637 struct tcf_block_ext_info *ei,
638 enum flow_block_command command,
639 struct netlink_ext_ack *extack)
641 struct flow_block_offload bo = {};
644 bo.net = dev_net(dev);
645 bo.command = command;
646 bo.binder_type = ei->binder_type;
647 bo.block = &block->flow_block;
648 bo.block_shared = tcf_block_shared(block);
650 INIT_LIST_HEAD(&bo.cb_list);
652 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
656 return tcf_block_setup(block, &bo);
659 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
660 struct tcf_block_ext_info *ei,
661 struct netlink_ext_ack *extack)
663 struct net_device *dev = q->dev_queue->dev;
666 down_write(&block->cb_lock);
667 if (!dev->netdev_ops->ndo_setup_tc)
668 goto no_offload_dev_inc;
670 /* If tc offload feature is disabled and the block we try to bind
671 * to already has some offloaded filters, forbid to bind.
673 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
674 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
679 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
680 if (err == -EOPNOTSUPP)
681 goto no_offload_dev_inc;
685 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
686 up_write(&block->cb_lock);
690 if (tcf_block_offload_in_use(block)) {
695 block->nooffloaddevcnt++;
696 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
698 up_write(&block->cb_lock);
702 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
703 struct tcf_block_ext_info *ei)
705 struct net_device *dev = q->dev_queue->dev;
708 down_write(&block->cb_lock);
709 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
711 if (!dev->netdev_ops->ndo_setup_tc)
712 goto no_offload_dev_dec;
713 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
714 if (err == -EOPNOTSUPP)
715 goto no_offload_dev_dec;
716 up_write(&block->cb_lock);
720 WARN_ON(block->nooffloaddevcnt-- == 0);
721 up_write(&block->cb_lock);
725 tcf_chain0_head_change_cb_add(struct tcf_block *block,
726 struct tcf_block_ext_info *ei,
727 struct netlink_ext_ack *extack)
729 struct tcf_filter_chain_list_item *item;
730 struct tcf_chain *chain0;
732 item = kmalloc(sizeof(*item), GFP_KERNEL);
734 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
737 item->chain_head_change = ei->chain_head_change;
738 item->chain_head_change_priv = ei->chain_head_change_priv;
740 mutex_lock(&block->lock);
741 chain0 = block->chain0.chain;
743 tcf_chain_hold(chain0);
745 list_add(&item->list, &block->chain0.filter_chain_list);
746 mutex_unlock(&block->lock);
749 struct tcf_proto *tp_head;
751 mutex_lock(&chain0->filter_chain_lock);
753 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
755 tcf_chain_head_change_item(item, tp_head);
757 mutex_lock(&block->lock);
758 list_add(&item->list, &block->chain0.filter_chain_list);
759 mutex_unlock(&block->lock);
761 mutex_unlock(&chain0->filter_chain_lock);
762 tcf_chain_put(chain0);
769 tcf_chain0_head_change_cb_del(struct tcf_block *block,
770 struct tcf_block_ext_info *ei)
772 struct tcf_filter_chain_list_item *item;
774 mutex_lock(&block->lock);
775 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
776 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
777 (item->chain_head_change == ei->chain_head_change &&
778 item->chain_head_change_priv == ei->chain_head_change_priv)) {
779 if (block->chain0.chain)
780 tcf_chain_head_change_item(item, NULL);
781 list_del(&item->list);
782 mutex_unlock(&block->lock);
788 mutex_unlock(&block->lock);
793 spinlock_t idr_lock; /* Protects idr */
797 static unsigned int tcf_net_id;
799 static int tcf_block_insert(struct tcf_block *block, struct net *net,
800 struct netlink_ext_ack *extack)
802 struct tcf_net *tn = net_generic(net, tcf_net_id);
805 idr_preload(GFP_KERNEL);
806 spin_lock(&tn->idr_lock);
807 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
809 spin_unlock(&tn->idr_lock);
815 static void tcf_block_remove(struct tcf_block *block, struct net *net)
817 struct tcf_net *tn = net_generic(net, tcf_net_id);
819 spin_lock(&tn->idr_lock);
820 idr_remove(&tn->idr, block->index);
821 spin_unlock(&tn->idr_lock);
824 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
826 struct netlink_ext_ack *extack)
828 struct tcf_block *block;
830 block = kzalloc(sizeof(*block), GFP_KERNEL);
832 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
833 return ERR_PTR(-ENOMEM);
835 mutex_init(&block->lock);
836 init_rwsem(&block->cb_lock);
837 flow_block_init(&block->flow_block);
838 INIT_LIST_HEAD(&block->chain_list);
839 INIT_LIST_HEAD(&block->owner_list);
840 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
842 refcount_set(&block->refcnt, 1);
844 block->index = block_index;
846 /* Don't store q pointer for blocks which are shared */
847 if (!tcf_block_shared(block))
852 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
854 struct tcf_net *tn = net_generic(net, tcf_net_id);
856 return idr_find(&tn->idr, block_index);
859 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
861 struct tcf_block *block;
864 block = tcf_block_lookup(net, block_index);
865 if (block && !refcount_inc_not_zero(&block->refcnt))
872 static struct tcf_chain *
873 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
875 mutex_lock(&block->lock);
877 chain = list_is_last(&chain->list, &block->chain_list) ?
878 NULL : list_next_entry(chain, list);
880 chain = list_first_entry_or_null(&block->chain_list,
881 struct tcf_chain, list);
883 /* skip all action-only chains */
884 while (chain && tcf_chain_held_by_acts_only(chain))
885 chain = list_is_last(&chain->list, &block->chain_list) ?
886 NULL : list_next_entry(chain, list);
889 tcf_chain_hold(chain);
890 mutex_unlock(&block->lock);
895 /* Function to be used by all clients that want to iterate over all chains on
896 * block. It properly obtains block->lock and takes reference to chain before
897 * returning it. Users of this function must be tolerant to concurrent chain
898 * insertion/deletion or ensure that no concurrent chain modification is
899 * possible. Note that all netlink dump callbacks cannot guarantee to provide
900 * consistent dump because rtnl lock is released each time skb is filled with
901 * data and sent to user-space.
905 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
907 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
910 tcf_chain_put(chain);
914 EXPORT_SYMBOL(tcf_get_next_chain);
916 static struct tcf_proto *
917 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
922 mutex_lock(&chain->filter_chain_lock);
925 tp = tcf_chain_dereference(chain->filter_chain, chain);
926 } else if (tcf_proto_is_deleting(tp)) {
927 /* 'deleting' flag is set and chain->filter_chain_lock was
928 * unlocked, which means next pointer could be invalid. Restart
932 tp = tcf_chain_dereference(chain->filter_chain, chain);
934 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
935 if (!tp->deleting && tp->prio >= prio)
938 tp = tcf_chain_dereference(tp->next, chain);
944 mutex_unlock(&chain->filter_chain_lock);
949 /* Function to be used by all clients that want to iterate over all tp's on
950 * chain. Users of this function must be tolerant to concurrent tp
951 * insertion/deletion or ensure that no concurrent chain modification is
952 * possible. Note that all netlink dump callbacks cannot guarantee to provide
953 * consistent dump because rtnl lock is released each time skb is filled with
954 * data and sent to user-space.
958 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
961 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
964 tcf_proto_put(tp, rtnl_held, NULL);
968 EXPORT_SYMBOL(tcf_get_next_proto);
970 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
972 struct tcf_chain *chain;
974 /* Last reference to block. At this point chains cannot be added or
975 * removed concurrently.
977 for (chain = tcf_get_next_chain(block, NULL);
979 chain = tcf_get_next_chain(block, chain)) {
980 tcf_chain_put_explicitly_created(chain);
981 tcf_chain_flush(chain, rtnl_held);
985 /* Lookup Qdisc and increments its reference counter.
986 * Set parent, if necessary.
989 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
990 u32 *parent, int ifindex, bool rtnl_held,
991 struct netlink_ext_ack *extack)
993 const struct Qdisc_class_ops *cops;
994 struct net_device *dev;
997 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1003 dev = dev_get_by_index_rcu(net, ifindex);
1012 *parent = (*q)->handle;
1014 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1016 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1022 *q = qdisc_refcount_inc_nz(*q);
1024 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1029 /* Is it classful? */
1030 cops = (*q)->ops->cl_ops;
1032 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1037 if (!cops->tcf_block) {
1038 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1044 /* At this point we know that qdisc is not noop_qdisc,
1045 * which means that qdisc holds a reference to net_device
1046 * and we hold a reference to qdisc, so it is safe to release
1058 qdisc_put_unlocked(*q);
1064 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1065 int ifindex, struct netlink_ext_ack *extack)
1067 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1070 /* Do we search for filter, attached to class? */
1071 if (TC_H_MIN(parent)) {
1072 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1074 *cl = cops->find(q, parent);
1076 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1084 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1085 unsigned long cl, int ifindex,
1087 struct netlink_ext_ack *extack)
1089 struct tcf_block *block;
1091 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1092 block = tcf_block_refcnt_get(net, block_index);
1094 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1095 return ERR_PTR(-EINVAL);
1098 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1100 block = cops->tcf_block(q, cl, extack);
1102 return ERR_PTR(-EINVAL);
1104 if (tcf_block_shared(block)) {
1105 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1106 return ERR_PTR(-EOPNOTSUPP);
1109 /* Always take reference to block in order to support execution
1110 * of rules update path of cls API without rtnl lock. Caller
1111 * must release block when it is finished using it. 'if' block
1112 * of this conditional obtain reference to block by calling
1113 * tcf_block_refcnt_get().
1115 refcount_inc(&block->refcnt);
1121 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1122 struct tcf_block_ext_info *ei, bool rtnl_held)
1124 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1125 /* Flushing/putting all chains will cause the block to be
1126 * deallocated when last chain is freed. However, if chain_list
1127 * is empty, block has to be manually deallocated. After block
1128 * reference counter reached 0, it is no longer possible to
1129 * increment it or add new chains to block.
1131 bool free_block = list_empty(&block->chain_list);
1133 mutex_unlock(&block->lock);
1134 if (tcf_block_shared(block))
1135 tcf_block_remove(block, block->net);
1138 tcf_block_offload_unbind(block, q, ei);
1141 tcf_block_destroy(block);
1143 tcf_block_flush_all_chains(block, rtnl_held);
1145 tcf_block_offload_unbind(block, q, ei);
1149 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1151 __tcf_block_put(block, NULL, NULL, rtnl_held);
1155 * Set q, parent, cl when appropriate.
1158 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1159 u32 *parent, unsigned long *cl,
1160 int ifindex, u32 block_index,
1161 struct netlink_ext_ack *extack)
1163 struct tcf_block *block;
1168 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1172 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1176 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1177 if (IS_ERR(block)) {
1178 err = PTR_ERR(block);
1189 return ERR_PTR(err);
1192 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1195 if (!IS_ERR_OR_NULL(block))
1196 tcf_block_refcnt_put(block, rtnl_held);
1202 qdisc_put_unlocked(q);
1206 struct tcf_block_owner_item {
1207 struct list_head list;
1209 enum flow_block_binder_type binder_type;
1213 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1215 enum flow_block_binder_type binder_type)
1217 if (block->keep_dst &&
1218 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1219 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1220 netif_keep_dst(qdisc_dev(q));
1223 void tcf_block_netif_keep_dst(struct tcf_block *block)
1225 struct tcf_block_owner_item *item;
1227 block->keep_dst = true;
1228 list_for_each_entry(item, &block->owner_list, list)
1229 tcf_block_owner_netif_keep_dst(block, item->q,
1232 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1234 static int tcf_block_owner_add(struct tcf_block *block,
1236 enum flow_block_binder_type binder_type)
1238 struct tcf_block_owner_item *item;
1240 item = kmalloc(sizeof(*item), GFP_KERNEL);
1244 item->binder_type = binder_type;
1245 list_add(&item->list, &block->owner_list);
1249 static void tcf_block_owner_del(struct tcf_block *block,
1251 enum flow_block_binder_type binder_type)
1253 struct tcf_block_owner_item *item;
1255 list_for_each_entry(item, &block->owner_list, list) {
1256 if (item->q == q && item->binder_type == binder_type) {
1257 list_del(&item->list);
1265 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1266 struct tcf_block_ext_info *ei,
1267 struct netlink_ext_ack *extack)
1269 struct net *net = qdisc_net(q);
1270 struct tcf_block *block = NULL;
1273 if (ei->block_index)
1274 /* block_index not 0 means the shared block is requested */
1275 block = tcf_block_refcnt_get(net, ei->block_index);
1278 block = tcf_block_create(net, q, ei->block_index, extack);
1280 return PTR_ERR(block);
1281 if (tcf_block_shared(block)) {
1282 err = tcf_block_insert(block, net, extack);
1284 goto err_block_insert;
1288 err = tcf_block_owner_add(block, q, ei->binder_type);
1290 goto err_block_owner_add;
1292 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1294 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1296 goto err_chain0_head_change_cb_add;
1298 err = tcf_block_offload_bind(block, q, ei, extack);
1300 goto err_block_offload_bind;
1305 err_block_offload_bind:
1306 tcf_chain0_head_change_cb_del(block, ei);
1307 err_chain0_head_change_cb_add:
1308 tcf_block_owner_del(block, q, ei->binder_type);
1309 err_block_owner_add:
1311 tcf_block_refcnt_put(block, true);
1314 EXPORT_SYMBOL(tcf_block_get_ext);
1316 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1318 struct tcf_proto __rcu **p_filter_chain = priv;
1320 rcu_assign_pointer(*p_filter_chain, tp_head);
1323 int tcf_block_get(struct tcf_block **p_block,
1324 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1325 struct netlink_ext_ack *extack)
1327 struct tcf_block_ext_info ei = {
1328 .chain_head_change = tcf_chain_head_change_dflt,
1329 .chain_head_change_priv = p_filter_chain,
1332 WARN_ON(!p_filter_chain);
1333 return tcf_block_get_ext(p_block, q, &ei, extack);
1335 EXPORT_SYMBOL(tcf_block_get);
1337 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1338 * actions should be all removed after flushing.
1340 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1341 struct tcf_block_ext_info *ei)
1345 tcf_chain0_head_change_cb_del(block, ei);
1346 tcf_block_owner_del(block, q, ei->binder_type);
1348 __tcf_block_put(block, q, ei, true);
1350 EXPORT_SYMBOL(tcf_block_put_ext);
1352 void tcf_block_put(struct tcf_block *block)
1354 struct tcf_block_ext_info ei = {0, };
1358 tcf_block_put_ext(block, block->q, &ei);
1361 EXPORT_SYMBOL(tcf_block_put);
1364 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1365 void *cb_priv, bool add, bool offload_in_use,
1366 struct netlink_ext_ack *extack)
1368 struct tcf_chain *chain, *chain_prev;
1369 struct tcf_proto *tp, *tp_prev;
1372 lockdep_assert_held(&block->cb_lock);
1374 for (chain = __tcf_get_next_chain(block, NULL);
1377 chain = __tcf_get_next_chain(block, chain),
1378 tcf_chain_put(chain_prev)) {
1379 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1381 tp = __tcf_get_next_proto(chain, tp),
1382 tcf_proto_put(tp_prev, true, NULL)) {
1383 if (tp->ops->reoffload) {
1384 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1387 goto err_playback_remove;
1388 } else if (add && offload_in_use) {
1390 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1391 goto err_playback_remove;
1398 err_playback_remove:
1399 tcf_proto_put(tp, true, NULL);
1400 tcf_chain_put(chain);
1401 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1406 static int tcf_block_bind(struct tcf_block *block,
1407 struct flow_block_offload *bo)
1409 struct flow_block_cb *block_cb, *next;
1412 lockdep_assert_held(&block->cb_lock);
1414 list_for_each_entry(block_cb, &bo->cb_list, list) {
1415 err = tcf_block_playback_offloads(block, block_cb->cb,
1416 block_cb->cb_priv, true,
1417 tcf_block_offload_in_use(block),
1421 if (!bo->unlocked_driver_cb)
1422 block->lockeddevcnt++;
1426 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1431 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1433 list_del(&block_cb->list);
1434 tcf_block_playback_offloads(block, block_cb->cb,
1435 block_cb->cb_priv, false,
1436 tcf_block_offload_in_use(block),
1438 if (!bo->unlocked_driver_cb)
1439 block->lockeddevcnt--;
1441 flow_block_cb_free(block_cb);
1447 static void tcf_block_unbind(struct tcf_block *block,
1448 struct flow_block_offload *bo)
1450 struct flow_block_cb *block_cb, *next;
1452 lockdep_assert_held(&block->cb_lock);
1454 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1455 tcf_block_playback_offloads(block, block_cb->cb,
1456 block_cb->cb_priv, false,
1457 tcf_block_offload_in_use(block),
1459 list_del(&block_cb->list);
1460 flow_block_cb_free(block_cb);
1461 if (!bo->unlocked_driver_cb)
1462 block->lockeddevcnt--;
1466 static int tcf_block_setup(struct tcf_block *block,
1467 struct flow_block_offload *bo)
1471 switch (bo->command) {
1472 case FLOW_BLOCK_BIND:
1473 err = tcf_block_bind(block, bo);
1475 case FLOW_BLOCK_UNBIND:
1477 tcf_block_unbind(block, bo);
1487 /* Main classifier routine: scans classifier chain attached
1488 * to this qdisc, (optionally) tests for protocol and asks
1489 * specific classifiers.
1491 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1492 struct tcf_result *res, bool compat_mode)
1494 #ifdef CONFIG_NET_CLS_ACT
1495 const int max_reclassify_loop = 4;
1496 const struct tcf_proto *orig_tp = tp;
1497 const struct tcf_proto *first_tp;
1502 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1503 __be16 protocol = tc_skb_protocol(skb);
1506 if (tp->protocol != protocol &&
1507 tp->protocol != htons(ETH_P_ALL))
1510 err = tp->classify(skb, tp, res);
1511 #ifdef CONFIG_NET_CLS_ACT
1512 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1515 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1516 first_tp = res->goto_tp;
1518 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1520 struct tc_skb_ext *ext;
1522 ext = skb_ext_add(skb, TC_SKB_EXT);
1523 if (WARN_ON_ONCE(!ext))
1526 ext->chain = err & TC_ACT_EXT_VAL_MASK;
1536 return TC_ACT_UNSPEC; /* signal: continue lookup */
1537 #ifdef CONFIG_NET_CLS_ACT
1539 if (unlikely(limit++ >= max_reclassify_loop)) {
1540 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1541 tp->chain->block->index,
1543 ntohs(tp->protocol));
1551 EXPORT_SYMBOL(tcf_classify);
1553 struct tcf_chain_info {
1554 struct tcf_proto __rcu **pprev;
1555 struct tcf_proto __rcu *next;
1558 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1559 struct tcf_chain_info *chain_info)
1561 return tcf_chain_dereference(*chain_info->pprev, chain);
1564 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1565 struct tcf_chain_info *chain_info,
1566 struct tcf_proto *tp)
1568 if (chain->flushing)
1571 if (*chain_info->pprev == chain->filter_chain)
1572 tcf_chain0_head_change(chain, tp);
1574 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1575 rcu_assign_pointer(*chain_info->pprev, tp);
1580 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1581 struct tcf_chain_info *chain_info,
1582 struct tcf_proto *tp)
1584 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1586 tcf_proto_mark_delete(tp);
1587 if (tp == chain->filter_chain)
1588 tcf_chain0_head_change(chain, next);
1589 RCU_INIT_POINTER(*chain_info->pprev, next);
1592 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1593 struct tcf_chain_info *chain_info,
1594 u32 protocol, u32 prio,
1595 bool prio_allocate);
1597 /* Try to insert new proto.
1598 * If proto with specified priority already exists, free new proto
1599 * and return existing one.
1602 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1603 struct tcf_proto *tp_new,
1604 u32 protocol, u32 prio,
1607 struct tcf_chain_info chain_info;
1608 struct tcf_proto *tp;
1611 mutex_lock(&chain->filter_chain_lock);
1613 tp = tcf_chain_tp_find(chain, &chain_info,
1614 protocol, prio, false);
1616 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1617 mutex_unlock(&chain->filter_chain_lock);
1620 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1623 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1624 tp_new = ERR_PTR(err);
1630 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1631 struct tcf_proto *tp, bool rtnl_held,
1632 struct netlink_ext_ack *extack)
1634 struct tcf_chain_info chain_info;
1635 struct tcf_proto *tp_iter;
1636 struct tcf_proto **pprev;
1637 struct tcf_proto *next;
1639 mutex_lock(&chain->filter_chain_lock);
1641 /* Atomically find and remove tp from chain. */
1642 for (pprev = &chain->filter_chain;
1643 (tp_iter = tcf_chain_dereference(*pprev, chain));
1644 pprev = &tp_iter->next) {
1645 if (tp_iter == tp) {
1646 chain_info.pprev = pprev;
1647 chain_info.next = tp_iter->next;
1648 WARN_ON(tp_iter->deleting);
1652 /* Verify that tp still exists and no new filters were inserted
1654 * Mark tp for deletion if it is empty.
1656 if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
1657 mutex_unlock(&chain->filter_chain_lock);
1661 next = tcf_chain_dereference(chain_info.next, chain);
1662 if (tp == chain->filter_chain)
1663 tcf_chain0_head_change(chain, next);
1664 RCU_INIT_POINTER(*chain_info.pprev, next);
1665 mutex_unlock(&chain->filter_chain_lock);
1667 tcf_proto_put(tp, rtnl_held, extack);
1670 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1671 struct tcf_chain_info *chain_info,
1672 u32 protocol, u32 prio,
1675 struct tcf_proto **pprev;
1676 struct tcf_proto *tp;
1678 /* Check the chain for existence of proto-tcf with this priority */
1679 for (pprev = &chain->filter_chain;
1680 (tp = tcf_chain_dereference(*pprev, chain));
1681 pprev = &tp->next) {
1682 if (tp->prio >= prio) {
1683 if (tp->prio == prio) {
1684 if (prio_allocate ||
1685 (tp->protocol != protocol && protocol))
1686 return ERR_PTR(-EINVAL);
1693 chain_info->pprev = pprev;
1695 chain_info->next = tp->next;
1698 chain_info->next = NULL;
1703 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1704 struct tcf_proto *tp, struct tcf_block *block,
1705 struct Qdisc *q, u32 parent, void *fh,
1706 u32 portid, u32 seq, u16 flags, int event,
1710 struct nlmsghdr *nlh;
1711 unsigned char *b = skb_tail_pointer(skb);
1713 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1715 goto out_nlmsg_trim;
1716 tcm = nlmsg_data(nlh);
1717 tcm->tcm_family = AF_UNSPEC;
1721 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1722 tcm->tcm_parent = parent;
1724 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1725 tcm->tcm_block_index = block->index;
1727 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1728 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1729 goto nla_put_failure;
1730 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1731 goto nla_put_failure;
1733 tcm->tcm_handle = 0;
1735 if (tp->ops->dump &&
1736 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1737 goto nla_put_failure;
1739 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1748 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1749 struct nlmsghdr *n, struct tcf_proto *tp,
1750 struct tcf_block *block, struct Qdisc *q,
1751 u32 parent, void *fh, int event, bool unicast,
1754 struct sk_buff *skb;
1755 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1758 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1762 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1763 n->nlmsg_seq, n->nlmsg_flags, event,
1770 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1772 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1773 n->nlmsg_flags & NLM_F_ECHO);
1780 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1781 struct nlmsghdr *n, struct tcf_proto *tp,
1782 struct tcf_block *block, struct Qdisc *q,
1783 u32 parent, void *fh, bool unicast, bool *last,
1784 bool rtnl_held, struct netlink_ext_ack *extack)
1786 struct sk_buff *skb;
1787 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1790 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1794 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1795 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1797 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1802 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1809 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1811 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1812 n->nlmsg_flags & NLM_F_ECHO);
1814 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1821 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1822 struct tcf_block *block, struct Qdisc *q,
1823 u32 parent, struct nlmsghdr *n,
1824 struct tcf_chain *chain, int event,
1827 struct tcf_proto *tp;
1829 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1830 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1831 tfilter_notify(net, oskb, n, tp, block,
1832 q, parent, NULL, event, false, rtnl_held);
1835 static void tfilter_put(struct tcf_proto *tp, void *fh)
1837 if (tp->ops->put && fh)
1838 tp->ops->put(tp, fh);
1841 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1842 struct netlink_ext_ack *extack)
1844 struct net *net = sock_net(skb->sk);
1845 struct nlattr *tca[TCA_MAX + 1];
1852 struct Qdisc *q = NULL;
1853 struct tcf_chain_info chain_info;
1854 struct tcf_chain *chain = NULL;
1855 struct tcf_block *block;
1856 struct tcf_proto *tp;
1861 bool rtnl_held = false;
1863 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1869 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1870 rtm_tca_policy, extack);
1875 protocol = TC_H_MIN(t->tcm_info);
1876 prio = TC_H_MAJ(t->tcm_info);
1877 prio_allocate = false;
1878 parent = t->tcm_parent;
1884 /* If no priority is provided by the user,
1887 if (n->nlmsg_flags & NLM_F_CREATE) {
1888 prio = TC_H_MAKE(0x80000000U, 0U);
1889 prio_allocate = true;
1891 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1896 /* Find head of filter chain. */
1898 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1902 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
1903 * block is shared (no qdisc found), qdisc is not unlocked, classifier
1904 * type is not specified, classifier is not unlocked.
1907 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
1908 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
1913 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
1917 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
1919 if (IS_ERR(block)) {
1920 err = PTR_ERR(block);
1924 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1925 if (chain_index > TC_ACT_EXT_VAL_MASK) {
1926 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1930 chain = tcf_chain_get(block, chain_index, true);
1932 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
1937 mutex_lock(&chain->filter_chain_lock);
1938 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1939 prio, prio_allocate);
1941 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1947 struct tcf_proto *tp_new = NULL;
1949 if (chain->flushing) {
1954 /* Proto-tcf does not exist, create new one */
1956 if (tca[TCA_KIND] == NULL || !protocol) {
1957 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
1962 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1963 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1969 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
1972 mutex_unlock(&chain->filter_chain_lock);
1973 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
1974 protocol, prio, chain, rtnl_held,
1976 if (IS_ERR(tp_new)) {
1977 err = PTR_ERR(tp_new);
1982 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
1989 mutex_unlock(&chain->filter_chain_lock);
1992 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1993 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1998 fh = tp->ops->get(tp, t->tcm_handle);
2001 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2002 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2006 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2007 tfilter_put(tp, fh);
2008 NL_SET_ERR_MSG(extack, "Filter already exists");
2013 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2014 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2019 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2020 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2023 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2024 RTM_NEWTFILTER, false, rtnl_held);
2025 tfilter_put(tp, fh);
2026 /* q pointer is NULL for shared blocks */
2028 q->flags &= ~TCQ_F_CAN_BYPASS;
2032 if (err && tp_created)
2033 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2036 if (tp && !IS_ERR(tp))
2037 tcf_proto_put(tp, rtnl_held, NULL);
2039 tcf_chain_put(chain);
2041 tcf_block_release(q, block, rtnl_held);
2046 if (err == -EAGAIN) {
2047 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2051 /* Replay the request. */
2057 mutex_unlock(&chain->filter_chain_lock);
2061 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2062 struct netlink_ext_ack *extack)
2064 struct net *net = sock_net(skb->sk);
2065 struct nlattr *tca[TCA_MAX + 1];
2071 struct Qdisc *q = NULL;
2072 struct tcf_chain_info chain_info;
2073 struct tcf_chain *chain = NULL;
2074 struct tcf_block *block = NULL;
2075 struct tcf_proto *tp = NULL;
2076 unsigned long cl = 0;
2079 bool rtnl_held = false;
2081 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2084 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2085 rtm_tca_policy, extack);
2090 protocol = TC_H_MIN(t->tcm_info);
2091 prio = TC_H_MAJ(t->tcm_info);
2092 parent = t->tcm_parent;
2094 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2095 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2099 /* Find head of filter chain. */
2101 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2105 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2106 * found), qdisc is not unlocked, classifier type is not specified,
2107 * classifier is not unlocked.
2110 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2111 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2116 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2120 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2122 if (IS_ERR(block)) {
2123 err = PTR_ERR(block);
2127 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2128 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2129 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2133 chain = tcf_chain_get(block, chain_index, false);
2135 /* User requested flush on non-existent chain. Nothing to do,
2136 * so just return success.
2142 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2148 tfilter_notify_chain(net, skb, block, q, parent, n,
2149 chain, RTM_DELTFILTER, rtnl_held);
2150 tcf_chain_flush(chain, rtnl_held);
2155 mutex_lock(&chain->filter_chain_lock);
2156 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2158 if (!tp || IS_ERR(tp)) {
2159 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2160 err = tp ? PTR_ERR(tp) : -ENOENT;
2162 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2163 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2166 } else if (t->tcm_handle == 0) {
2167 tcf_chain_tp_remove(chain, &chain_info, tp);
2168 mutex_unlock(&chain->filter_chain_lock);
2170 tcf_proto_put(tp, rtnl_held, NULL);
2171 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2172 RTM_DELTFILTER, false, rtnl_held);
2176 mutex_unlock(&chain->filter_chain_lock);
2178 fh = tp->ops->get(tp, t->tcm_handle);
2181 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2186 err = tfilter_del_notify(net, skb, n, tp, block,
2187 q, parent, fh, false, &last,
2193 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2198 if (tp && !IS_ERR(tp))
2199 tcf_proto_put(tp, rtnl_held, NULL);
2200 tcf_chain_put(chain);
2202 tcf_block_release(q, block, rtnl_held);
2210 mutex_unlock(&chain->filter_chain_lock);
2214 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2215 struct netlink_ext_ack *extack)
2217 struct net *net = sock_net(skb->sk);
2218 struct nlattr *tca[TCA_MAX + 1];
2224 struct Qdisc *q = NULL;
2225 struct tcf_chain_info chain_info;
2226 struct tcf_chain *chain = NULL;
2227 struct tcf_block *block = NULL;
2228 struct tcf_proto *tp = NULL;
2229 unsigned long cl = 0;
2232 bool rtnl_held = false;
2234 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2235 rtm_tca_policy, extack);
2240 protocol = TC_H_MIN(t->tcm_info);
2241 prio = TC_H_MAJ(t->tcm_info);
2242 parent = t->tcm_parent;
2245 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2249 /* Find head of filter chain. */
2251 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2255 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2256 * unlocked, classifier type is not specified, classifier is not
2259 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2260 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2265 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2269 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2271 if (IS_ERR(block)) {
2272 err = PTR_ERR(block);
2276 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2277 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2278 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2282 chain = tcf_chain_get(block, chain_index, false);
2284 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2289 mutex_lock(&chain->filter_chain_lock);
2290 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2292 mutex_unlock(&chain->filter_chain_lock);
2293 if (!tp || IS_ERR(tp)) {
2294 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2295 err = tp ? PTR_ERR(tp) : -ENOENT;
2297 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2298 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2303 fh = tp->ops->get(tp, t->tcm_handle);
2306 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2309 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2310 fh, RTM_NEWTFILTER, true, rtnl_held);
2312 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2315 tfilter_put(tp, fh);
2318 if (tp && !IS_ERR(tp))
2319 tcf_proto_put(tp, rtnl_held, NULL);
2320 tcf_chain_put(chain);
2322 tcf_block_release(q, block, rtnl_held);
2330 struct tcf_dump_args {
2331 struct tcf_walker w;
2332 struct sk_buff *skb;
2333 struct netlink_callback *cb;
2334 struct tcf_block *block;
2339 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2341 struct tcf_dump_args *a = (void *)arg;
2342 struct net *net = sock_net(a->skb->sk);
2344 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2345 n, NETLINK_CB(a->cb->skb).portid,
2346 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2347 RTM_NEWTFILTER, true);
2350 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2351 struct sk_buff *skb, struct netlink_callback *cb,
2352 long index_start, long *p_index)
2354 struct net *net = sock_net(skb->sk);
2355 struct tcf_block *block = chain->block;
2356 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2357 struct tcf_proto *tp, *tp_prev;
2358 struct tcf_dump_args arg;
2360 for (tp = __tcf_get_next_proto(chain, NULL);
2363 tp = __tcf_get_next_proto(chain, tp),
2364 tcf_proto_put(tp_prev, true, NULL),
2366 if (*p_index < index_start)
2368 if (TC_H_MAJ(tcm->tcm_info) &&
2369 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2371 if (TC_H_MIN(tcm->tcm_info) &&
2372 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2374 if (*p_index > index_start)
2375 memset(&cb->args[1], 0,
2376 sizeof(cb->args) - sizeof(cb->args[0]));
2377 if (cb->args[1] == 0) {
2378 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2379 NETLINK_CB(cb->skb).portid,
2380 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2381 RTM_NEWTFILTER, true) <= 0)
2387 arg.w.fn = tcf_node_dump;
2392 arg.parent = parent;
2394 arg.w.skip = cb->args[1] - 1;
2396 arg.w.cookie = cb->args[2];
2397 tp->ops->walk(tp, &arg.w, true);
2398 cb->args[2] = arg.w.cookie;
2399 cb->args[1] = arg.w.count + 1;
2406 tcf_proto_put(tp, true, NULL);
2410 /* called with RTNL */
2411 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2413 struct tcf_chain *chain, *chain_prev;
2414 struct net *net = sock_net(skb->sk);
2415 struct nlattr *tca[TCA_MAX + 1];
2416 struct Qdisc *q = NULL;
2417 struct tcf_block *block;
2418 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2424 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2427 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2432 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2433 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2436 /* If we work with block index, q is NULL and parent value
2437 * will never be used in the following code. The check
2438 * in tcf_fill_node prevents it. However, compiler does not
2439 * see that far, so set parent to zero to silence the warning
2440 * about parent being uninitialized.
2444 const struct Qdisc_class_ops *cops;
2445 struct net_device *dev;
2446 unsigned long cl = 0;
2448 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2452 parent = tcm->tcm_parent;
2457 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2461 cops = q->ops->cl_ops;
2464 if (!cops->tcf_block)
2466 if (TC_H_MIN(tcm->tcm_parent)) {
2467 cl = cops->find(q, tcm->tcm_parent);
2471 block = cops->tcf_block(q, cl, NULL);
2474 if (tcf_block_shared(block))
2478 index_start = cb->args[0];
2481 for (chain = __tcf_get_next_chain(block, NULL);
2484 chain = __tcf_get_next_chain(block, chain),
2485 tcf_chain_put(chain_prev)) {
2486 if (tca[TCA_CHAIN] &&
2487 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2489 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2490 index_start, &index)) {
2491 tcf_chain_put(chain);
2497 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2498 tcf_block_refcnt_put(block, true);
2499 cb->args[0] = index;
2502 /* If we did no progress, the error (EMSGSIZE) is real */
2503 if (skb->len == 0 && err)
2508 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2509 void *tmplt_priv, u32 chain_index,
2510 struct net *net, struct sk_buff *skb,
2511 struct tcf_block *block,
2512 u32 portid, u32 seq, u16 flags, int event)
2514 unsigned char *b = skb_tail_pointer(skb);
2515 const struct tcf_proto_ops *ops;
2516 struct nlmsghdr *nlh;
2523 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2525 goto out_nlmsg_trim;
2526 tcm = nlmsg_data(nlh);
2527 tcm->tcm_family = AF_UNSPEC;
2530 tcm->tcm_handle = 0;
2532 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2533 tcm->tcm_parent = block->q->handle;
2535 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2536 tcm->tcm_block_index = block->index;
2539 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2540 goto nla_put_failure;
2543 if (nla_put_string(skb, TCA_KIND, ops->kind))
2544 goto nla_put_failure;
2545 if (ops->tmplt_dump(skb, net, priv) < 0)
2546 goto nla_put_failure;
2549 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2558 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2559 u32 seq, u16 flags, int event, bool unicast)
2561 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2562 struct tcf_block *block = chain->block;
2563 struct net *net = block->net;
2564 struct sk_buff *skb;
2567 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2571 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2572 chain->index, net, skb, block, portid,
2573 seq, flags, event) <= 0) {
2579 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2581 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2582 flags & NLM_F_ECHO);
2589 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2590 void *tmplt_priv, u32 chain_index,
2591 struct tcf_block *block, struct sk_buff *oskb,
2592 u32 seq, u16 flags, bool unicast)
2594 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2595 struct net *net = block->net;
2596 struct sk_buff *skb;
2598 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2602 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2603 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2609 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2611 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2614 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2615 struct nlattr **tca,
2616 struct netlink_ext_ack *extack)
2618 const struct tcf_proto_ops *ops;
2621 /* If kind is not set, user did not specify template. */
2625 ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
2627 return PTR_ERR(ops);
2628 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2629 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2633 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2634 if (IS_ERR(tmplt_priv)) {
2635 module_put(ops->owner);
2636 return PTR_ERR(tmplt_priv);
2638 chain->tmplt_ops = ops;
2639 chain->tmplt_priv = tmplt_priv;
2643 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2646 /* If template ops are set, no work to do for us. */
2650 tmplt_ops->tmplt_destroy(tmplt_priv);
2651 module_put(tmplt_ops->owner);
2654 /* Add/delete/get a chain */
2656 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2657 struct netlink_ext_ack *extack)
2659 struct net *net = sock_net(skb->sk);
2660 struct nlattr *tca[TCA_MAX + 1];
2664 struct Qdisc *q = NULL;
2665 struct tcf_chain *chain = NULL;
2666 struct tcf_block *block;
2670 if (n->nlmsg_type != RTM_GETCHAIN &&
2671 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2675 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2676 rtm_tca_policy, extack);
2681 parent = t->tcm_parent;
2684 block = tcf_block_find(net, &q, &parent, &cl,
2685 t->tcm_ifindex, t->tcm_block_index, extack);
2687 return PTR_ERR(block);
2689 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2690 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2691 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2696 mutex_lock(&block->lock);
2697 chain = tcf_chain_lookup(block, chain_index);
2698 if (n->nlmsg_type == RTM_NEWCHAIN) {
2700 if (tcf_chain_held_by_acts_only(chain)) {
2701 /* The chain exists only because there is
2702 * some action referencing it.
2704 tcf_chain_hold(chain);
2706 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2708 goto errout_block_locked;
2711 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2712 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2714 goto errout_block_locked;
2716 chain = tcf_chain_create(block, chain_index);
2718 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2720 goto errout_block_locked;
2724 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2725 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2727 goto errout_block_locked;
2729 tcf_chain_hold(chain);
2732 if (n->nlmsg_type == RTM_NEWCHAIN) {
2733 /* Modifying chain requires holding parent block lock. In case
2734 * the chain was successfully added, take a reference to the
2735 * chain. This ensures that an empty chain does not disappear at
2736 * the end of this function.
2738 tcf_chain_hold(chain);
2739 chain->explicitly_created = true;
2741 mutex_unlock(&block->lock);
2743 switch (n->nlmsg_type) {
2745 err = tc_chain_tmplt_add(chain, net, tca, extack);
2747 tcf_chain_put_explicitly_created(chain);
2751 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2752 RTM_NEWCHAIN, false);
2755 tfilter_notify_chain(net, skb, block, q, parent, n,
2756 chain, RTM_DELTFILTER, true);
2757 /* Flush the chain first as the user requested chain removal. */
2758 tcf_chain_flush(chain, true);
2759 /* In case the chain was successfully deleted, put a reference
2760 * to the chain previously taken during addition.
2762 tcf_chain_put_explicitly_created(chain);
2765 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2766 n->nlmsg_seq, n->nlmsg_type, true);
2768 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2772 NL_SET_ERR_MSG(extack, "Unsupported message type");
2777 tcf_chain_put(chain);
2779 tcf_block_release(q, block, true);
2781 /* Replay the request. */
2785 errout_block_locked:
2786 mutex_unlock(&block->lock);
2790 /* called with RTNL */
2791 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2793 struct net *net = sock_net(skb->sk);
2794 struct nlattr *tca[TCA_MAX + 1];
2795 struct Qdisc *q = NULL;
2796 struct tcf_block *block;
2797 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2798 struct tcf_chain *chain;
2804 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2807 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2808 rtm_tca_policy, cb->extack);
2812 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2813 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2816 /* If we work with block index, q is NULL and parent value
2817 * will never be used in the following code. The check
2818 * in tcf_fill_node prevents it. However, compiler does not
2819 * see that far, so set parent to zero to silence the warning
2820 * about parent being uninitialized.
2824 const struct Qdisc_class_ops *cops;
2825 struct net_device *dev;
2826 unsigned long cl = 0;
2828 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2832 parent = tcm->tcm_parent;
2837 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2841 cops = q->ops->cl_ops;
2844 if (!cops->tcf_block)
2846 if (TC_H_MIN(tcm->tcm_parent)) {
2847 cl = cops->find(q, tcm->tcm_parent);
2851 block = cops->tcf_block(q, cl, NULL);
2854 if (tcf_block_shared(block))
2858 index_start = cb->args[0];
2861 mutex_lock(&block->lock);
2862 list_for_each_entry(chain, &block->chain_list, list) {
2863 if ((tca[TCA_CHAIN] &&
2864 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2866 if (index < index_start) {
2870 if (tcf_chain_held_by_acts_only(chain))
2872 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2873 chain->index, net, skb, block,
2874 NETLINK_CB(cb->skb).portid,
2875 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2881 mutex_unlock(&block->lock);
2883 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2884 tcf_block_refcnt_put(block, true);
2885 cb->args[0] = index;
2888 /* If we did no progress, the error (EMSGSIZE) is real */
2889 if (skb->len == 0 && err)
2894 void tcf_exts_destroy(struct tcf_exts *exts)
2896 #ifdef CONFIG_NET_CLS_ACT
2897 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
2898 kfree(exts->actions);
2899 exts->nr_actions = 0;
2902 EXPORT_SYMBOL(tcf_exts_destroy);
2904 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
2905 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
2906 bool rtnl_held, struct netlink_ext_ack *extack)
2908 #ifdef CONFIG_NET_CLS_ACT
2910 struct tc_action *act;
2911 size_t attr_size = 0;
2913 if (exts->police && tb[exts->police]) {
2914 act = tcf_action_init_1(net, tp, tb[exts->police],
2915 rate_tlv, "police", ovr,
2916 TCA_ACT_BIND, rtnl_held,
2919 return PTR_ERR(act);
2921 act->type = exts->type = TCA_OLD_COMPAT;
2922 exts->actions[0] = act;
2923 exts->nr_actions = 1;
2924 } else if (exts->action && tb[exts->action]) {
2927 err = tcf_action_init(net, tp, tb[exts->action],
2928 rate_tlv, NULL, ovr, TCA_ACT_BIND,
2929 exts->actions, &attr_size,
2933 exts->nr_actions = err;
2937 if ((exts->action && tb[exts->action]) ||
2938 (exts->police && tb[exts->police])) {
2939 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
2946 EXPORT_SYMBOL(tcf_exts_validate);
2948 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
2950 #ifdef CONFIG_NET_CLS_ACT
2951 struct tcf_exts old = *dst;
2954 tcf_exts_destroy(&old);
2957 EXPORT_SYMBOL(tcf_exts_change);
2959 #ifdef CONFIG_NET_CLS_ACT
2960 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2962 if (exts->nr_actions == 0)
2965 return exts->actions[0];
2969 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
2971 #ifdef CONFIG_NET_CLS_ACT
2972 struct nlattr *nest;
2974 if (exts->action && tcf_exts_has_actions(exts)) {
2976 * again for backward compatible mode - we want
2977 * to work with both old and new modes of entering
2978 * tc data even if iproute2 was newer - jhs
2980 if (exts->type != TCA_OLD_COMPAT) {
2981 nest = nla_nest_start_noflag(skb, exts->action);
2983 goto nla_put_failure;
2985 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
2986 goto nla_put_failure;
2987 nla_nest_end(skb, nest);
2988 } else if (exts->police) {
2989 struct tc_action *act = tcf_exts_first_act(exts);
2990 nest = nla_nest_start_noflag(skb, exts->police);
2991 if (nest == NULL || !act)
2992 goto nla_put_failure;
2993 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
2994 goto nla_put_failure;
2995 nla_nest_end(skb, nest);
3001 nla_nest_cancel(skb, nest);
3007 EXPORT_SYMBOL(tcf_exts_dump);
3010 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3012 #ifdef CONFIG_NET_CLS_ACT
3013 struct tc_action *a = tcf_exts_first_act(exts);
3014 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3019 EXPORT_SYMBOL(tcf_exts_dump_stats);
3021 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3023 if (*flags & TCA_CLS_FLAGS_IN_HW)
3025 *flags |= TCA_CLS_FLAGS_IN_HW;
3026 atomic_inc(&block->offloadcnt);
3029 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3031 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3033 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3034 atomic_dec(&block->offloadcnt);
3037 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3038 struct tcf_proto *tp, u32 *cnt,
3039 u32 *flags, u32 diff, bool add)
3041 lockdep_assert_held(&block->cb_lock);
3043 spin_lock(&tp->lock);
3046 tcf_block_offload_inc(block, flags);
3051 tcf_block_offload_dec(block, flags);
3053 spin_unlock(&tp->lock);
3057 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3058 u32 *cnt, u32 *flags)
3060 lockdep_assert_held(&block->cb_lock);
3062 spin_lock(&tp->lock);
3063 tcf_block_offload_dec(block, flags);
3065 spin_unlock(&tp->lock);
3069 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3070 void *type_data, bool err_stop)
3072 struct flow_block_cb *block_cb;
3076 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3077 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3088 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3089 void *type_data, bool err_stop, bool rtnl_held)
3091 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3097 down_read(&block->cb_lock);
3098 /* Need to obtain rtnl lock if block is bound to devs that require it.
3099 * In block bind code cb_lock is obtained while holding rtnl, so we must
3100 * obtain the locks in same order here.
3102 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3103 up_read(&block->cb_lock);
3108 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3110 up_read(&block->cb_lock);
3115 EXPORT_SYMBOL(tc_setup_cb_call);
3117 /* Non-destructive filter add. If filter that wasn't already in hardware is
3118 * successfully offloaded, increment block offloads counter. On failure,
3119 * previously offloaded filter is considered to be intact and offloads counter
3120 * is not decremented.
3123 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3124 enum tc_setup_type type, void *type_data, bool err_stop,
3125 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3127 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3133 down_read(&block->cb_lock);
3134 /* Need to obtain rtnl lock if block is bound to devs that require it.
3135 * In block bind code cb_lock is obtained while holding rtnl, so we must
3136 * obtain the locks in same order here.
3138 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3139 up_read(&block->cb_lock);
3144 /* Make sure all netdevs sharing this block are offload-capable. */
3145 if (block->nooffloaddevcnt && err_stop) {
3146 ok_count = -EOPNOTSUPP;
3150 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3154 if (tp->ops->hw_add)
3155 tp->ops->hw_add(tp, type_data);
3157 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3160 up_read(&block->cb_lock);
3163 return ok_count < 0 ? ok_count : 0;
3165 EXPORT_SYMBOL(tc_setup_cb_add);
3167 /* Destructive filter replace. If filter that wasn't already in hardware is
3168 * successfully offloaded, increment block offload counter. On failure,
3169 * previously offloaded filter is considered to be destroyed and offload counter
3173 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3174 enum tc_setup_type type, void *type_data, bool err_stop,
3175 u32 *old_flags, unsigned int *old_in_hw_count,
3176 u32 *new_flags, unsigned int *new_in_hw_count,
3179 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3185 down_read(&block->cb_lock);
3186 /* Need to obtain rtnl lock if block is bound to devs that require it.
3187 * In block bind code cb_lock is obtained while holding rtnl, so we must
3188 * obtain the locks in same order here.
3190 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3191 up_read(&block->cb_lock);
3196 /* Make sure all netdevs sharing this block are offload-capable. */
3197 if (block->nooffloaddevcnt && err_stop) {
3198 ok_count = -EOPNOTSUPP;
3202 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3203 if (tp->ops->hw_del)
3204 tp->ops->hw_del(tp, type_data);
3206 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3210 if (tp->ops->hw_add)
3211 tp->ops->hw_add(tp, type_data);
3213 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3214 new_flags, ok_count, true);
3216 up_read(&block->cb_lock);
3219 return ok_count < 0 ? ok_count : 0;
3221 EXPORT_SYMBOL(tc_setup_cb_replace);
3223 /* Destroy filter and decrement block offload counter, if filter was previously
3227 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3228 enum tc_setup_type type, void *type_data, bool err_stop,
3229 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3231 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3237 down_read(&block->cb_lock);
3238 /* Need to obtain rtnl lock if block is bound to devs that require it.
3239 * In block bind code cb_lock is obtained while holding rtnl, so we must
3240 * obtain the locks in same order here.
3242 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3243 up_read(&block->cb_lock);
3248 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3250 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3251 if (tp->ops->hw_del)
3252 tp->ops->hw_del(tp, type_data);
3254 up_read(&block->cb_lock);
3257 return ok_count < 0 ? ok_count : 0;
3259 EXPORT_SYMBOL(tc_setup_cb_destroy);
3261 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3262 bool add, flow_setup_cb_t *cb,
3263 enum tc_setup_type type, void *type_data,
3264 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3266 int err = cb(type, type_data, cb_priv);
3269 if (add && tc_skip_sw(*flags))
3272 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3278 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3280 void tc_cleanup_flow_action(struct flow_action *flow_action)
3282 struct flow_action_entry *entry;
3285 flow_action_for_each(i, entry, flow_action)
3286 if (entry->destructor)
3287 entry->destructor(entry->destructor_priv);
3289 EXPORT_SYMBOL(tc_cleanup_flow_action);
3291 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3292 const struct tc_action *act)
3294 #ifdef CONFIG_NET_CLS_ACT
3295 entry->dev = act->ops->get_dev(act, &entry->destructor);
3298 entry->destructor_priv = entry->dev;
3302 static void tcf_tunnel_encap_put_tunnel(void *priv)
3304 struct ip_tunnel_info *tunnel = priv;
3309 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3310 const struct tc_action *act)
3312 entry->tunnel = tcf_tunnel_info_copy(act);
3315 entry->destructor = tcf_tunnel_encap_put_tunnel;
3316 entry->destructor_priv = entry->tunnel;
3320 static void tcf_sample_get_group(struct flow_action_entry *entry,
3321 const struct tc_action *act)
3323 #ifdef CONFIG_NET_CLS_ACT
3324 entry->sample.psample_group =
3325 act->ops->get_psample_group(act, &entry->destructor);
3326 entry->destructor_priv = entry->sample.psample_group;
3330 int tc_setup_flow_action(struct flow_action *flow_action,
3331 const struct tcf_exts *exts, bool rtnl_held)
3333 const struct tc_action *act;
3334 int i, j, k, err = 0;
3343 tcf_exts_for_each_action(i, act, exts) {
3344 struct flow_action_entry *entry;
3346 entry = &flow_action->entries[j];
3347 if (is_tcf_gact_ok(act)) {
3348 entry->id = FLOW_ACTION_ACCEPT;
3349 } else if (is_tcf_gact_shot(act)) {
3350 entry->id = FLOW_ACTION_DROP;
3351 } else if (is_tcf_gact_trap(act)) {
3352 entry->id = FLOW_ACTION_TRAP;
3353 } else if (is_tcf_gact_goto_chain(act)) {
3354 entry->id = FLOW_ACTION_GOTO;
3355 entry->chain_index = tcf_gact_goto_chain_index(act);
3356 } else if (is_tcf_mirred_egress_redirect(act)) {
3357 entry->id = FLOW_ACTION_REDIRECT;
3358 tcf_mirred_get_dev(entry, act);
3359 } else if (is_tcf_mirred_egress_mirror(act)) {
3360 entry->id = FLOW_ACTION_MIRRED;
3361 tcf_mirred_get_dev(entry, act);
3362 } else if (is_tcf_mirred_ingress_redirect(act)) {
3363 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3364 tcf_mirred_get_dev(entry, act);
3365 } else if (is_tcf_mirred_ingress_mirror(act)) {
3366 entry->id = FLOW_ACTION_MIRRED_INGRESS;
3367 tcf_mirred_get_dev(entry, act);
3368 } else if (is_tcf_vlan(act)) {
3369 switch (tcf_vlan_action(act)) {
3370 case TCA_VLAN_ACT_PUSH:
3371 entry->id = FLOW_ACTION_VLAN_PUSH;
3372 entry->vlan.vid = tcf_vlan_push_vid(act);
3373 entry->vlan.proto = tcf_vlan_push_proto(act);
3374 entry->vlan.prio = tcf_vlan_push_prio(act);
3376 case TCA_VLAN_ACT_POP:
3377 entry->id = FLOW_ACTION_VLAN_POP;
3379 case TCA_VLAN_ACT_MODIFY:
3380 entry->id = FLOW_ACTION_VLAN_MANGLE;
3381 entry->vlan.vid = tcf_vlan_push_vid(act);
3382 entry->vlan.proto = tcf_vlan_push_proto(act);
3383 entry->vlan.prio = tcf_vlan_push_prio(act);
3389 } else if (is_tcf_tunnel_set(act)) {
3390 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3391 err = tcf_tunnel_encap_get_tunnel(entry, act);
3394 } else if (is_tcf_tunnel_release(act)) {
3395 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3396 } else if (is_tcf_pedit(act)) {
3397 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3398 switch (tcf_pedit_cmd(act, k)) {
3399 case TCA_PEDIT_KEY_EX_CMD_SET:
3400 entry->id = FLOW_ACTION_MANGLE;
3402 case TCA_PEDIT_KEY_EX_CMD_ADD:
3403 entry->id = FLOW_ACTION_ADD;
3409 entry->mangle.htype = tcf_pedit_htype(act, k);
3410 entry->mangle.mask = tcf_pedit_mask(act, k);
3411 entry->mangle.val = tcf_pedit_val(act, k);
3412 entry->mangle.offset = tcf_pedit_offset(act, k);
3413 entry = &flow_action->entries[++j];
3415 } else if (is_tcf_csum(act)) {
3416 entry->id = FLOW_ACTION_CSUM;
3417 entry->csum_flags = tcf_csum_update_flags(act);
3418 } else if (is_tcf_skbedit_mark(act)) {
3419 entry->id = FLOW_ACTION_MARK;
3420 entry->mark = tcf_skbedit_mark(act);
3421 } else if (is_tcf_sample(act)) {
3422 entry->id = FLOW_ACTION_SAMPLE;
3423 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3424 entry->sample.truncate = tcf_sample_truncate(act);
3425 entry->sample.rate = tcf_sample_rate(act);
3426 tcf_sample_get_group(entry, act);
3427 } else if (is_tcf_police(act)) {
3428 entry->id = FLOW_ACTION_POLICE;
3429 entry->police.burst = tcf_police_tcfp_burst(act);
3430 entry->police.rate_bytes_ps =
3431 tcf_police_rate_bytes_ps(act);
3432 } else if (is_tcf_ct(act)) {
3433 entry->id = FLOW_ACTION_CT;
3434 entry->ct.action = tcf_ct_action(act);
3435 entry->ct.zone = tcf_ct_zone(act);
3436 } else if (is_tcf_mpls(act)) {
3437 switch (tcf_mpls_action(act)) {
3438 case TCA_MPLS_ACT_PUSH:
3439 entry->id = FLOW_ACTION_MPLS_PUSH;
3440 entry->mpls_push.proto = tcf_mpls_proto(act);
3441 entry->mpls_push.label = tcf_mpls_label(act);
3442 entry->mpls_push.tc = tcf_mpls_tc(act);
3443 entry->mpls_push.bos = tcf_mpls_bos(act);
3444 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3446 case TCA_MPLS_ACT_POP:
3447 entry->id = FLOW_ACTION_MPLS_POP;
3448 entry->mpls_pop.proto = tcf_mpls_proto(act);
3450 case TCA_MPLS_ACT_MODIFY:
3451 entry->id = FLOW_ACTION_MPLS_MANGLE;
3452 entry->mpls_mangle.label = tcf_mpls_label(act);
3453 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3454 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3455 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3460 } else if (is_tcf_skbedit_ptype(act)) {
3461 entry->id = FLOW_ACTION_PTYPE;
3462 entry->ptype = tcf_skbedit_ptype(act);
3468 if (!is_tcf_pedit(act))
3477 tc_cleanup_flow_action(flow_action);
3481 EXPORT_SYMBOL(tc_setup_flow_action);
3483 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3485 unsigned int num_acts = 0;
3486 struct tc_action *act;
3489 tcf_exts_for_each_action(i, act, exts) {
3490 if (is_tcf_pedit(act))
3491 num_acts += tcf_pedit_nkeys(act);
3497 EXPORT_SYMBOL(tcf_exts_num_actions);
3499 static __net_init int tcf_net_init(struct net *net)
3501 struct tcf_net *tn = net_generic(net, tcf_net_id);
3503 spin_lock_init(&tn->idr_lock);
3508 static void __net_exit tcf_net_exit(struct net *net)
3510 struct tcf_net *tn = net_generic(net, tcf_net_id);
3512 idr_destroy(&tn->idr);
3515 static struct pernet_operations tcf_net_ops = {
3516 .init = tcf_net_init,
3517 .exit = tcf_net_exit,
3519 .size = sizeof(struct tcf_net),
3522 static struct flow_indr_block_ing_entry block_ing_entry = {
3523 .cb = tc_indr_block_get_and_ing_cmd,
3524 .list = LIST_HEAD_INIT(block_ing_entry.list),
3527 static int __init tc_filter_init(void)
3531 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3535 err = register_pernet_subsys(&tcf_net_ops);
3537 goto err_register_pernet_subsys;
3539 flow_indr_add_block_ing_cb(&block_ing_entry);
3541 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3542 RTNL_FLAG_DOIT_UNLOCKED);
3543 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3544 RTNL_FLAG_DOIT_UNLOCKED);
3545 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3546 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3547 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3548 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3549 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3554 err_register_pernet_subsys:
3555 destroy_workqueue(tc_filter_wq);
3559 subsys_initcall(tc_filter_init);