]> asedeno.scripts.mit.edu Git - linux.git/blob - net/sched/cls_api.c
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / net / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
29 #include <net/tc_act/tc_pedit.h>
30 #include <net/tc_act/tc_mirred.h>
31 #include <net/tc_act/tc_vlan.h>
32 #include <net/tc_act/tc_tunnel_key.h>
33 #include <net/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_gact.h>
35 #include <net/tc_act/tc_police.h>
36 #include <net/tc_act/tc_sample.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <net/tc_act/tc_ct.h>
39
40 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
41
42 /* The list of all installed classifier types */
43 static LIST_HEAD(tcf_proto_base);
44
45 /* Protects list of registered TC modules. It is pure SMP lock. */
46 static DEFINE_RWLOCK(cls_mod_lock);
47
48 /* Find classifier type by string name */
49
50 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
51 {
52         const struct tcf_proto_ops *t, *res = NULL;
53
54         if (kind) {
55                 read_lock(&cls_mod_lock);
56                 list_for_each_entry(t, &tcf_proto_base, head) {
57                         if (strcmp(kind, t->kind) == 0) {
58                                 if (try_module_get(t->owner))
59                                         res = t;
60                                 break;
61                         }
62                 }
63                 read_unlock(&cls_mod_lock);
64         }
65         return res;
66 }
67
68 static const struct tcf_proto_ops *
69 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
70                      struct netlink_ext_ack *extack)
71 {
72         const struct tcf_proto_ops *ops;
73
74         ops = __tcf_proto_lookup_ops(kind);
75         if (ops)
76                 return ops;
77 #ifdef CONFIG_MODULES
78         if (rtnl_held)
79                 rtnl_unlock();
80         request_module("cls_%s", kind);
81         if (rtnl_held)
82                 rtnl_lock();
83         ops = __tcf_proto_lookup_ops(kind);
84         /* We dropped the RTNL semaphore in order to perform
85          * the module load. So, even if we succeeded in loading
86          * the module we have to replay the request. We indicate
87          * this using -EAGAIN.
88          */
89         if (ops) {
90                 module_put(ops->owner);
91                 return ERR_PTR(-EAGAIN);
92         }
93 #endif
94         NL_SET_ERR_MSG(extack, "TC classifier not found");
95         return ERR_PTR(-ENOENT);
96 }
97
98 /* Register(unregister) new classifier type */
99
100 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
101 {
102         struct tcf_proto_ops *t;
103         int rc = -EEXIST;
104
105         write_lock(&cls_mod_lock);
106         list_for_each_entry(t, &tcf_proto_base, head)
107                 if (!strcmp(ops->kind, t->kind))
108                         goto out;
109
110         list_add_tail(&ops->head, &tcf_proto_base);
111         rc = 0;
112 out:
113         write_unlock(&cls_mod_lock);
114         return rc;
115 }
116 EXPORT_SYMBOL(register_tcf_proto_ops);
117
118 static struct workqueue_struct *tc_filter_wq;
119
120 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
121 {
122         struct tcf_proto_ops *t;
123         int rc = -ENOENT;
124
125         /* Wait for outstanding call_rcu()s, if any, from a
126          * tcf_proto_ops's destroy() handler.
127          */
128         rcu_barrier();
129         flush_workqueue(tc_filter_wq);
130
131         write_lock(&cls_mod_lock);
132         list_for_each_entry(t, &tcf_proto_base, head) {
133                 if (t == ops) {
134                         list_del(&t->head);
135                         rc = 0;
136                         break;
137                 }
138         }
139         write_unlock(&cls_mod_lock);
140         return rc;
141 }
142 EXPORT_SYMBOL(unregister_tcf_proto_ops);
143
144 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
145 {
146         INIT_RCU_WORK(rwork, func);
147         return queue_rcu_work(tc_filter_wq, rwork);
148 }
149 EXPORT_SYMBOL(tcf_queue_work);
150
151 /* Select new prio value from the range, managed by kernel. */
152
153 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
154 {
155         u32 first = TC_H_MAKE(0xC0000000U, 0U);
156
157         if (tp)
158                 first = tp->prio - 1;
159
160         return TC_H_MAJ(first);
161 }
162
163 static bool tcf_proto_is_unlocked(const char *kind)
164 {
165         const struct tcf_proto_ops *ops;
166         bool ret;
167
168         ops = tcf_proto_lookup_ops(kind, false, NULL);
169         /* On error return false to take rtnl lock. Proto lookup/create
170          * functions will perform lookup again and properly handle errors.
171          */
172         if (IS_ERR(ops))
173                 return false;
174
175         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
176         module_put(ops->owner);
177         return ret;
178 }
179
180 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
181                                           u32 prio, struct tcf_chain *chain,
182                                           bool rtnl_held,
183                                           struct netlink_ext_ack *extack)
184 {
185         struct tcf_proto *tp;
186         int err;
187
188         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
189         if (!tp)
190                 return ERR_PTR(-ENOBUFS);
191
192         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
193         if (IS_ERR(tp->ops)) {
194                 err = PTR_ERR(tp->ops);
195                 goto errout;
196         }
197         tp->classify = tp->ops->classify;
198         tp->protocol = protocol;
199         tp->prio = prio;
200         tp->chain = chain;
201         spin_lock_init(&tp->lock);
202         refcount_set(&tp->refcnt, 1);
203
204         err = tp->ops->init(tp);
205         if (err) {
206                 module_put(tp->ops->owner);
207                 goto errout;
208         }
209         return tp;
210
211 errout:
212         kfree(tp);
213         return ERR_PTR(err);
214 }
215
216 static void tcf_proto_get(struct tcf_proto *tp)
217 {
218         refcount_inc(&tp->refcnt);
219 }
220
221 static void tcf_chain_put(struct tcf_chain *chain);
222
223 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
224                               struct netlink_ext_ack *extack)
225 {
226         tp->ops->destroy(tp, rtnl_held, extack);
227         tcf_chain_put(tp->chain);
228         module_put(tp->ops->owner);
229         kfree_rcu(tp, rcu);
230 }
231
232 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
233                           struct netlink_ext_ack *extack)
234 {
235         if (refcount_dec_and_test(&tp->refcnt))
236                 tcf_proto_destroy(tp, rtnl_held, extack);
237 }
238
239 static int walker_check_empty(struct tcf_proto *tp, void *fh,
240                               struct tcf_walker *arg)
241 {
242         if (fh) {
243                 arg->nonempty = true;
244                 return -1;
245         }
246         return 0;
247 }
248
249 static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
250 {
251         struct tcf_walker walker = { .fn = walker_check_empty, };
252
253         if (tp->ops->walk) {
254                 tp->ops->walk(tp, &walker, rtnl_held);
255                 return !walker.nonempty;
256         }
257         return true;
258 }
259
260 static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
261 {
262         spin_lock(&tp->lock);
263         if (tcf_proto_is_empty(tp, rtnl_held))
264                 tp->deleting = true;
265         spin_unlock(&tp->lock);
266         return tp->deleting;
267 }
268
269 static void tcf_proto_mark_delete(struct tcf_proto *tp)
270 {
271         spin_lock(&tp->lock);
272         tp->deleting = true;
273         spin_unlock(&tp->lock);
274 }
275
276 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
277 {
278         bool deleting;
279
280         spin_lock(&tp->lock);
281         deleting = tp->deleting;
282         spin_unlock(&tp->lock);
283
284         return deleting;
285 }
286
287 #define ASSERT_BLOCK_LOCKED(block)                                      \
288         lockdep_assert_held(&(block)->lock)
289
290 struct tcf_filter_chain_list_item {
291         struct list_head list;
292         tcf_chain_head_change_t *chain_head_change;
293         void *chain_head_change_priv;
294 };
295
296 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
297                                           u32 chain_index)
298 {
299         struct tcf_chain *chain;
300
301         ASSERT_BLOCK_LOCKED(block);
302
303         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
304         if (!chain)
305                 return NULL;
306         list_add_tail(&chain->list, &block->chain_list);
307         mutex_init(&chain->filter_chain_lock);
308         chain->block = block;
309         chain->index = chain_index;
310         chain->refcnt = 1;
311         if (!chain->index)
312                 block->chain0.chain = chain;
313         return chain;
314 }
315
316 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
317                                        struct tcf_proto *tp_head)
318 {
319         if (item->chain_head_change)
320                 item->chain_head_change(tp_head, item->chain_head_change_priv);
321 }
322
323 static void tcf_chain0_head_change(struct tcf_chain *chain,
324                                    struct tcf_proto *tp_head)
325 {
326         struct tcf_filter_chain_list_item *item;
327         struct tcf_block *block = chain->block;
328
329         if (chain->index)
330                 return;
331
332         mutex_lock(&block->lock);
333         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
334                 tcf_chain_head_change_item(item, tp_head);
335         mutex_unlock(&block->lock);
336 }
337
338 /* Returns true if block can be safely freed. */
339
340 static bool tcf_chain_detach(struct tcf_chain *chain)
341 {
342         struct tcf_block *block = chain->block;
343
344         ASSERT_BLOCK_LOCKED(block);
345
346         list_del(&chain->list);
347         if (!chain->index)
348                 block->chain0.chain = NULL;
349
350         if (list_empty(&block->chain_list) &&
351             refcount_read(&block->refcnt) == 0)
352                 return true;
353
354         return false;
355 }
356
357 static void tcf_block_destroy(struct tcf_block *block)
358 {
359         mutex_destroy(&block->lock);
360         kfree_rcu(block, rcu);
361 }
362
363 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
364 {
365         struct tcf_block *block = chain->block;
366
367         mutex_destroy(&chain->filter_chain_lock);
368         kfree_rcu(chain, rcu);
369         if (free_block)
370                 tcf_block_destroy(block);
371 }
372
373 static void tcf_chain_hold(struct tcf_chain *chain)
374 {
375         ASSERT_BLOCK_LOCKED(chain->block);
376
377         ++chain->refcnt;
378 }
379
380 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
381 {
382         ASSERT_BLOCK_LOCKED(chain->block);
383
384         /* In case all the references are action references, this
385          * chain should not be shown to the user.
386          */
387         return chain->refcnt == chain->action_refcnt;
388 }
389
390 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
391                                           u32 chain_index)
392 {
393         struct tcf_chain *chain;
394
395         ASSERT_BLOCK_LOCKED(block);
396
397         list_for_each_entry(chain, &block->chain_list, list) {
398                 if (chain->index == chain_index)
399                         return chain;
400         }
401         return NULL;
402 }
403
404 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
405                            u32 seq, u16 flags, int event, bool unicast);
406
407 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
408                                          u32 chain_index, bool create,
409                                          bool by_act)
410 {
411         struct tcf_chain *chain = NULL;
412         bool is_first_reference;
413
414         mutex_lock(&block->lock);
415         chain = tcf_chain_lookup(block, chain_index);
416         if (chain) {
417                 tcf_chain_hold(chain);
418         } else {
419                 if (!create)
420                         goto errout;
421                 chain = tcf_chain_create(block, chain_index);
422                 if (!chain)
423                         goto errout;
424         }
425
426         if (by_act)
427                 ++chain->action_refcnt;
428         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
429         mutex_unlock(&block->lock);
430
431         /* Send notification only in case we got the first
432          * non-action reference. Until then, the chain acts only as
433          * a placeholder for actions pointing to it and user ought
434          * not know about them.
435          */
436         if (is_first_reference && !by_act)
437                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
438                                 RTM_NEWCHAIN, false);
439
440         return chain;
441
442 errout:
443         mutex_unlock(&block->lock);
444         return chain;
445 }
446
447 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
448                                        bool create)
449 {
450         return __tcf_chain_get(block, chain_index, create, false);
451 }
452
453 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
454 {
455         return __tcf_chain_get(block, chain_index, true, true);
456 }
457 EXPORT_SYMBOL(tcf_chain_get_by_act);
458
459 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
460                                void *tmplt_priv);
461 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
462                                   void *tmplt_priv, u32 chain_index,
463                                   struct tcf_block *block, struct sk_buff *oskb,
464                                   u32 seq, u16 flags, bool unicast);
465
466 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
467                             bool explicitly_created)
468 {
469         struct tcf_block *block = chain->block;
470         const struct tcf_proto_ops *tmplt_ops;
471         bool free_block = false;
472         unsigned int refcnt;
473         void *tmplt_priv;
474
475         mutex_lock(&block->lock);
476         if (explicitly_created) {
477                 if (!chain->explicitly_created) {
478                         mutex_unlock(&block->lock);
479                         return;
480                 }
481                 chain->explicitly_created = false;
482         }
483
484         if (by_act)
485                 chain->action_refcnt--;
486
487         /* tc_chain_notify_delete can't be called while holding block lock.
488          * However, when block is unlocked chain can be changed concurrently, so
489          * save these to temporary variables.
490          */
491         refcnt = --chain->refcnt;
492         tmplt_ops = chain->tmplt_ops;
493         tmplt_priv = chain->tmplt_priv;
494
495         /* The last dropped non-action reference will trigger notification. */
496         if (refcnt - chain->action_refcnt == 0 && !by_act) {
497                 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
498                                        block, NULL, 0, 0, false);
499                 /* Last reference to chain, no need to lock. */
500                 chain->flushing = false;
501         }
502
503         if (refcnt == 0)
504                 free_block = tcf_chain_detach(chain);
505         mutex_unlock(&block->lock);
506
507         if (refcnt == 0) {
508                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
509                 tcf_chain_destroy(chain, free_block);
510         }
511 }
512
513 static void tcf_chain_put(struct tcf_chain *chain)
514 {
515         __tcf_chain_put(chain, false, false);
516 }
517
518 void tcf_chain_put_by_act(struct tcf_chain *chain)
519 {
520         __tcf_chain_put(chain, true, false);
521 }
522 EXPORT_SYMBOL(tcf_chain_put_by_act);
523
524 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
525 {
526         __tcf_chain_put(chain, false, true);
527 }
528
529 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
530 {
531         struct tcf_proto *tp, *tp_next;
532
533         mutex_lock(&chain->filter_chain_lock);
534         tp = tcf_chain_dereference(chain->filter_chain, chain);
535         RCU_INIT_POINTER(chain->filter_chain, NULL);
536         tcf_chain0_head_change(chain, NULL);
537         chain->flushing = true;
538         mutex_unlock(&chain->filter_chain_lock);
539
540         while (tp) {
541                 tp_next = rcu_dereference_protected(tp->next, 1);
542                 tcf_proto_put(tp, rtnl_held, NULL);
543                 tp = tp_next;
544         }
545 }
546
547 static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
548 {
549         const struct Qdisc_class_ops *cops;
550         struct Qdisc *qdisc;
551
552         if (!dev_ingress_queue(dev))
553                 return NULL;
554
555         qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
556         if (!qdisc)
557                 return NULL;
558
559         cops = qdisc->ops->cl_ops;
560         if (!cops)
561                 return NULL;
562
563         if (!cops->tcf_block)
564                 return NULL;
565
566         return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
567 }
568
569 static struct rhashtable indr_setup_block_ht;
570
571 struct tc_indr_block_dev {
572         struct rhash_head ht_node;
573         struct net_device *dev;
574         unsigned int refcnt;
575         struct list_head cb_list;
576         struct tcf_block *block;
577 };
578
579 struct tc_indr_block_cb {
580         struct list_head list;
581         void *cb_priv;
582         tc_indr_block_bind_cb_t *cb;
583         void *cb_ident;
584 };
585
586 static const struct rhashtable_params tc_indr_setup_block_ht_params = {
587         .key_offset     = offsetof(struct tc_indr_block_dev, dev),
588         .head_offset    = offsetof(struct tc_indr_block_dev, ht_node),
589         .key_len        = sizeof(struct net_device *),
590 };
591
592 static struct tc_indr_block_dev *
593 tc_indr_block_dev_lookup(struct net_device *dev)
594 {
595         return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
596                                       tc_indr_setup_block_ht_params);
597 }
598
599 static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
600 {
601         struct tc_indr_block_dev *indr_dev;
602
603         indr_dev = tc_indr_block_dev_lookup(dev);
604         if (indr_dev)
605                 goto inc_ref;
606
607         indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
608         if (!indr_dev)
609                 return NULL;
610
611         INIT_LIST_HEAD(&indr_dev->cb_list);
612         indr_dev->dev = dev;
613         indr_dev->block = tc_dev_ingress_block(dev);
614         if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
615                                    tc_indr_setup_block_ht_params)) {
616                 kfree(indr_dev);
617                 return NULL;
618         }
619
620 inc_ref:
621         indr_dev->refcnt++;
622         return indr_dev;
623 }
624
625 static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
626 {
627         if (--indr_dev->refcnt)
628                 return;
629
630         rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
631                                tc_indr_setup_block_ht_params);
632         kfree(indr_dev);
633 }
634
635 static struct tc_indr_block_cb *
636 tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
637                         tc_indr_block_bind_cb_t *cb, void *cb_ident)
638 {
639         struct tc_indr_block_cb *indr_block_cb;
640
641         list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
642                 if (indr_block_cb->cb == cb &&
643                     indr_block_cb->cb_ident == cb_ident)
644                         return indr_block_cb;
645         return NULL;
646 }
647
648 static struct tc_indr_block_cb *
649 tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
650                      tc_indr_block_bind_cb_t *cb, void *cb_ident)
651 {
652         struct tc_indr_block_cb *indr_block_cb;
653
654         indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
655         if (indr_block_cb)
656                 return ERR_PTR(-EEXIST);
657
658         indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
659         if (!indr_block_cb)
660                 return ERR_PTR(-ENOMEM);
661
662         indr_block_cb->cb_priv = cb_priv;
663         indr_block_cb->cb = cb;
664         indr_block_cb->cb_ident = cb_ident;
665         list_add(&indr_block_cb->list, &indr_dev->cb_list);
666
667         return indr_block_cb;
668 }
669
670 static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
671 {
672         list_del(&indr_block_cb->list);
673         kfree(indr_block_cb);
674 }
675
676 static int tcf_block_setup(struct tcf_block *block,
677                            struct flow_block_offload *bo);
678
679 static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
680                                   struct tc_indr_block_cb *indr_block_cb,
681                                   enum flow_block_command command)
682 {
683         struct flow_block_offload bo = {
684                 .command        = command,
685                 .binder_type    = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
686                 .net            = dev_net(indr_dev->dev),
687                 .block_shared   = tcf_block_non_null_shared(indr_dev->block),
688         };
689         INIT_LIST_HEAD(&bo.cb_list);
690
691         if (!indr_dev->block)
692                 return;
693
694         bo.block = &indr_dev->block->flow_block;
695
696         indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
697                           &bo);
698         tcf_block_setup(indr_dev->block, &bo);
699 }
700
701 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
702                                 tc_indr_block_bind_cb_t *cb, void *cb_ident)
703 {
704         struct tc_indr_block_cb *indr_block_cb;
705         struct tc_indr_block_dev *indr_dev;
706         int err;
707
708         indr_dev = tc_indr_block_dev_get(dev);
709         if (!indr_dev)
710                 return -ENOMEM;
711
712         indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
713         err = PTR_ERR_OR_ZERO(indr_block_cb);
714         if (err)
715                 goto err_dev_put;
716
717         tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_BIND);
718         return 0;
719
720 err_dev_put:
721         tc_indr_block_dev_put(indr_dev);
722         return err;
723 }
724 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
725
726 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
727                               tc_indr_block_bind_cb_t *cb, void *cb_ident)
728 {
729         int err;
730
731         rtnl_lock();
732         err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
733         rtnl_unlock();
734
735         return err;
736 }
737 EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
738
739 void __tc_indr_block_cb_unregister(struct net_device *dev,
740                                    tc_indr_block_bind_cb_t *cb, void *cb_ident)
741 {
742         struct tc_indr_block_cb *indr_block_cb;
743         struct tc_indr_block_dev *indr_dev;
744
745         indr_dev = tc_indr_block_dev_lookup(dev);
746         if (!indr_dev)
747                 return;
748
749         indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
750         if (!indr_block_cb)
751                 return;
752
753         /* Send unbind message if required to free any block cbs. */
754         tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_UNBIND);
755         tc_indr_block_cb_del(indr_block_cb);
756         tc_indr_block_dev_put(indr_dev);
757 }
758 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
759
760 void tc_indr_block_cb_unregister(struct net_device *dev,
761                                  tc_indr_block_bind_cb_t *cb, void *cb_ident)
762 {
763         rtnl_lock();
764         __tc_indr_block_cb_unregister(dev, cb, cb_ident);
765         rtnl_unlock();
766 }
767 EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
768
769 static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
770                                struct tcf_block_ext_info *ei,
771                                enum flow_block_command command,
772                                struct netlink_ext_ack *extack)
773 {
774         struct tc_indr_block_cb *indr_block_cb;
775         struct tc_indr_block_dev *indr_dev;
776         struct flow_block_offload bo = {
777                 .command        = command,
778                 .binder_type    = ei->binder_type,
779                 .net            = dev_net(dev),
780                 .block          = &block->flow_block,
781                 .block_shared   = tcf_block_shared(block),
782                 .extack         = extack,
783         };
784         INIT_LIST_HEAD(&bo.cb_list);
785
786         indr_dev = tc_indr_block_dev_lookup(dev);
787         if (!indr_dev)
788                 return;
789
790         indr_dev->block = command == FLOW_BLOCK_BIND ? block : NULL;
791
792         list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
793                 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
794                                   &bo);
795
796         tcf_block_setup(block, &bo);
797 }
798
799 static bool tcf_block_offload_in_use(struct tcf_block *block)
800 {
801         return block->offloadcnt;
802 }
803
804 static int tcf_block_offload_cmd(struct tcf_block *block,
805                                  struct net_device *dev,
806                                  struct tcf_block_ext_info *ei,
807                                  enum flow_block_command command,
808                                  struct netlink_ext_ack *extack)
809 {
810         struct flow_block_offload bo = {};
811         int err;
812
813         bo.net = dev_net(dev);
814         bo.command = command;
815         bo.binder_type = ei->binder_type;
816         bo.block = &block->flow_block;
817         bo.block_shared = tcf_block_shared(block);
818         bo.extack = extack;
819         INIT_LIST_HEAD(&bo.cb_list);
820
821         err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
822         if (err < 0)
823                 return err;
824
825         return tcf_block_setup(block, &bo);
826 }
827
828 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
829                                   struct tcf_block_ext_info *ei,
830                                   struct netlink_ext_ack *extack)
831 {
832         struct net_device *dev = q->dev_queue->dev;
833         int err;
834
835         if (!dev->netdev_ops->ndo_setup_tc)
836                 goto no_offload_dev_inc;
837
838         /* If tc offload feature is disabled and the block we try to bind
839          * to already has some offloaded filters, forbid to bind.
840          */
841         if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
842                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
843                 return -EOPNOTSUPP;
844         }
845
846         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
847         if (err == -EOPNOTSUPP)
848                 goto no_offload_dev_inc;
849         if (err)
850                 return err;
851
852         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
853         return 0;
854
855 no_offload_dev_inc:
856         if (tcf_block_offload_in_use(block))
857                 return -EOPNOTSUPP;
858         block->nooffloaddevcnt++;
859         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
860         return 0;
861 }
862
863 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
864                                      struct tcf_block_ext_info *ei)
865 {
866         struct net_device *dev = q->dev_queue->dev;
867         int err;
868
869         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
870
871         if (!dev->netdev_ops->ndo_setup_tc)
872                 goto no_offload_dev_dec;
873         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
874         if (err == -EOPNOTSUPP)
875                 goto no_offload_dev_dec;
876         return;
877
878 no_offload_dev_dec:
879         WARN_ON(block->nooffloaddevcnt-- == 0);
880 }
881
882 static int
883 tcf_chain0_head_change_cb_add(struct tcf_block *block,
884                               struct tcf_block_ext_info *ei,
885                               struct netlink_ext_ack *extack)
886 {
887         struct tcf_filter_chain_list_item *item;
888         struct tcf_chain *chain0;
889
890         item = kmalloc(sizeof(*item), GFP_KERNEL);
891         if (!item) {
892                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
893                 return -ENOMEM;
894         }
895         item->chain_head_change = ei->chain_head_change;
896         item->chain_head_change_priv = ei->chain_head_change_priv;
897
898         mutex_lock(&block->lock);
899         chain0 = block->chain0.chain;
900         if (chain0)
901                 tcf_chain_hold(chain0);
902         else
903                 list_add(&item->list, &block->chain0.filter_chain_list);
904         mutex_unlock(&block->lock);
905
906         if (chain0) {
907                 struct tcf_proto *tp_head;
908
909                 mutex_lock(&chain0->filter_chain_lock);
910
911                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
912                 if (tp_head)
913                         tcf_chain_head_change_item(item, tp_head);
914
915                 mutex_lock(&block->lock);
916                 list_add(&item->list, &block->chain0.filter_chain_list);
917                 mutex_unlock(&block->lock);
918
919                 mutex_unlock(&chain0->filter_chain_lock);
920                 tcf_chain_put(chain0);
921         }
922
923         return 0;
924 }
925
926 static void
927 tcf_chain0_head_change_cb_del(struct tcf_block *block,
928                               struct tcf_block_ext_info *ei)
929 {
930         struct tcf_filter_chain_list_item *item;
931
932         mutex_lock(&block->lock);
933         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
934                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
935                     (item->chain_head_change == ei->chain_head_change &&
936                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
937                         if (block->chain0.chain)
938                                 tcf_chain_head_change_item(item, NULL);
939                         list_del(&item->list);
940                         mutex_unlock(&block->lock);
941
942                         kfree(item);
943                         return;
944                 }
945         }
946         mutex_unlock(&block->lock);
947         WARN_ON(1);
948 }
949
950 struct tcf_net {
951         spinlock_t idr_lock; /* Protects idr */
952         struct idr idr;
953 };
954
955 static unsigned int tcf_net_id;
956
957 static int tcf_block_insert(struct tcf_block *block, struct net *net,
958                             struct netlink_ext_ack *extack)
959 {
960         struct tcf_net *tn = net_generic(net, tcf_net_id);
961         int err;
962
963         idr_preload(GFP_KERNEL);
964         spin_lock(&tn->idr_lock);
965         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
966                             GFP_NOWAIT);
967         spin_unlock(&tn->idr_lock);
968         idr_preload_end();
969
970         return err;
971 }
972
973 static void tcf_block_remove(struct tcf_block *block, struct net *net)
974 {
975         struct tcf_net *tn = net_generic(net, tcf_net_id);
976
977         spin_lock(&tn->idr_lock);
978         idr_remove(&tn->idr, block->index);
979         spin_unlock(&tn->idr_lock);
980 }
981
982 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
983                                           u32 block_index,
984                                           struct netlink_ext_ack *extack)
985 {
986         struct tcf_block *block;
987
988         block = kzalloc(sizeof(*block), GFP_KERNEL);
989         if (!block) {
990                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
991                 return ERR_PTR(-ENOMEM);
992         }
993         mutex_init(&block->lock);
994         flow_block_init(&block->flow_block);
995         INIT_LIST_HEAD(&block->chain_list);
996         INIT_LIST_HEAD(&block->owner_list);
997         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
998
999         refcount_set(&block->refcnt, 1);
1000         block->net = net;
1001         block->index = block_index;
1002
1003         /* Don't store q pointer for blocks which are shared */
1004         if (!tcf_block_shared(block))
1005                 block->q = q;
1006         return block;
1007 }
1008
1009 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1010 {
1011         struct tcf_net *tn = net_generic(net, tcf_net_id);
1012
1013         return idr_find(&tn->idr, block_index);
1014 }
1015
1016 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1017 {
1018         struct tcf_block *block;
1019
1020         rcu_read_lock();
1021         block = tcf_block_lookup(net, block_index);
1022         if (block && !refcount_inc_not_zero(&block->refcnt))
1023                 block = NULL;
1024         rcu_read_unlock();
1025
1026         return block;
1027 }
1028
1029 static struct tcf_chain *
1030 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1031 {
1032         mutex_lock(&block->lock);
1033         if (chain)
1034                 chain = list_is_last(&chain->list, &block->chain_list) ?
1035                         NULL : list_next_entry(chain, list);
1036         else
1037                 chain = list_first_entry_or_null(&block->chain_list,
1038                                                  struct tcf_chain, list);
1039
1040         /* skip all action-only chains */
1041         while (chain && tcf_chain_held_by_acts_only(chain))
1042                 chain = list_is_last(&chain->list, &block->chain_list) ?
1043                         NULL : list_next_entry(chain, list);
1044
1045         if (chain)
1046                 tcf_chain_hold(chain);
1047         mutex_unlock(&block->lock);
1048
1049         return chain;
1050 }
1051
1052 /* Function to be used by all clients that want to iterate over all chains on
1053  * block. It properly obtains block->lock and takes reference to chain before
1054  * returning it. Users of this function must be tolerant to concurrent chain
1055  * insertion/deletion or ensure that no concurrent chain modification is
1056  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1057  * consistent dump because rtnl lock is released each time skb is filled with
1058  * data and sent to user-space.
1059  */
1060
1061 struct tcf_chain *
1062 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1063 {
1064         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1065
1066         if (chain)
1067                 tcf_chain_put(chain);
1068
1069         return chain_next;
1070 }
1071 EXPORT_SYMBOL(tcf_get_next_chain);
1072
1073 static struct tcf_proto *
1074 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1075 {
1076         u32 prio = 0;
1077
1078         ASSERT_RTNL();
1079         mutex_lock(&chain->filter_chain_lock);
1080
1081         if (!tp) {
1082                 tp = tcf_chain_dereference(chain->filter_chain, chain);
1083         } else if (tcf_proto_is_deleting(tp)) {
1084                 /* 'deleting' flag is set and chain->filter_chain_lock was
1085                  * unlocked, which means next pointer could be invalid. Restart
1086                  * search.
1087                  */
1088                 prio = tp->prio + 1;
1089                 tp = tcf_chain_dereference(chain->filter_chain, chain);
1090
1091                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1092                         if (!tp->deleting && tp->prio >= prio)
1093                                 break;
1094         } else {
1095                 tp = tcf_chain_dereference(tp->next, chain);
1096         }
1097
1098         if (tp)
1099                 tcf_proto_get(tp);
1100
1101         mutex_unlock(&chain->filter_chain_lock);
1102
1103         return tp;
1104 }
1105
1106 /* Function to be used by all clients that want to iterate over all tp's on
1107  * chain. Users of this function must be tolerant to concurrent tp
1108  * insertion/deletion or ensure that no concurrent chain modification is
1109  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1110  * consistent dump because rtnl lock is released each time skb is filled with
1111  * data and sent to user-space.
1112  */
1113
1114 struct tcf_proto *
1115 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1116                    bool rtnl_held)
1117 {
1118         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1119
1120         if (tp)
1121                 tcf_proto_put(tp, rtnl_held, NULL);
1122
1123         return tp_next;
1124 }
1125 EXPORT_SYMBOL(tcf_get_next_proto);
1126
1127 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1128 {
1129         struct tcf_chain *chain;
1130
1131         /* Last reference to block. At this point chains cannot be added or
1132          * removed concurrently.
1133          */
1134         for (chain = tcf_get_next_chain(block, NULL);
1135              chain;
1136              chain = tcf_get_next_chain(block, chain)) {
1137                 tcf_chain_put_explicitly_created(chain);
1138                 tcf_chain_flush(chain, rtnl_held);
1139         }
1140 }
1141
1142 /* Lookup Qdisc and increments its reference counter.
1143  * Set parent, if necessary.
1144  */
1145
1146 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1147                             u32 *parent, int ifindex, bool rtnl_held,
1148                             struct netlink_ext_ack *extack)
1149 {
1150         const struct Qdisc_class_ops *cops;
1151         struct net_device *dev;
1152         int err = 0;
1153
1154         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1155                 return 0;
1156
1157         rcu_read_lock();
1158
1159         /* Find link */
1160         dev = dev_get_by_index_rcu(net, ifindex);
1161         if (!dev) {
1162                 rcu_read_unlock();
1163                 return -ENODEV;
1164         }
1165
1166         /* Find qdisc */
1167         if (!*parent) {
1168                 *q = dev->qdisc;
1169                 *parent = (*q)->handle;
1170         } else {
1171                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1172                 if (!*q) {
1173                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1174                         err = -EINVAL;
1175                         goto errout_rcu;
1176                 }
1177         }
1178
1179         *q = qdisc_refcount_inc_nz(*q);
1180         if (!*q) {
1181                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1182                 err = -EINVAL;
1183                 goto errout_rcu;
1184         }
1185
1186         /* Is it classful? */
1187         cops = (*q)->ops->cl_ops;
1188         if (!cops) {
1189                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1190                 err = -EINVAL;
1191                 goto errout_qdisc;
1192         }
1193
1194         if (!cops->tcf_block) {
1195                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1196                 err = -EOPNOTSUPP;
1197                 goto errout_qdisc;
1198         }
1199
1200 errout_rcu:
1201         /* At this point we know that qdisc is not noop_qdisc,
1202          * which means that qdisc holds a reference to net_device
1203          * and we hold a reference to qdisc, so it is safe to release
1204          * rcu read lock.
1205          */
1206         rcu_read_unlock();
1207         return err;
1208
1209 errout_qdisc:
1210         rcu_read_unlock();
1211
1212         if (rtnl_held)
1213                 qdisc_put(*q);
1214         else
1215                 qdisc_put_unlocked(*q);
1216         *q = NULL;
1217
1218         return err;
1219 }
1220
1221 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1222                                int ifindex, struct netlink_ext_ack *extack)
1223 {
1224         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1225                 return 0;
1226
1227         /* Do we search for filter, attached to class? */
1228         if (TC_H_MIN(parent)) {
1229                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1230
1231                 *cl = cops->find(q, parent);
1232                 if (*cl == 0) {
1233                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1234                         return -ENOENT;
1235                 }
1236         }
1237
1238         return 0;
1239 }
1240
1241 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1242                                           unsigned long cl, int ifindex,
1243                                           u32 block_index,
1244                                           struct netlink_ext_ack *extack)
1245 {
1246         struct tcf_block *block;
1247
1248         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1249                 block = tcf_block_refcnt_get(net, block_index);
1250                 if (!block) {
1251                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1252                         return ERR_PTR(-EINVAL);
1253                 }
1254         } else {
1255                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1256
1257                 block = cops->tcf_block(q, cl, extack);
1258                 if (!block)
1259                         return ERR_PTR(-EINVAL);
1260
1261                 if (tcf_block_shared(block)) {
1262                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1263                         return ERR_PTR(-EOPNOTSUPP);
1264                 }
1265
1266                 /* Always take reference to block in order to support execution
1267                  * of rules update path of cls API without rtnl lock. Caller
1268                  * must release block when it is finished using it. 'if' block
1269                  * of this conditional obtain reference to block by calling
1270                  * tcf_block_refcnt_get().
1271                  */
1272                 refcount_inc(&block->refcnt);
1273         }
1274
1275         return block;
1276 }
1277
1278 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1279                             struct tcf_block_ext_info *ei, bool rtnl_held)
1280 {
1281         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1282                 /* Flushing/putting all chains will cause the block to be
1283                  * deallocated when last chain is freed. However, if chain_list
1284                  * is empty, block has to be manually deallocated. After block
1285                  * reference counter reached 0, it is no longer possible to
1286                  * increment it or add new chains to block.
1287                  */
1288                 bool free_block = list_empty(&block->chain_list);
1289
1290                 mutex_unlock(&block->lock);
1291                 if (tcf_block_shared(block))
1292                         tcf_block_remove(block, block->net);
1293
1294                 if (q)
1295                         tcf_block_offload_unbind(block, q, ei);
1296
1297                 if (free_block)
1298                         tcf_block_destroy(block);
1299                 else
1300                         tcf_block_flush_all_chains(block, rtnl_held);
1301         } else if (q) {
1302                 tcf_block_offload_unbind(block, q, ei);
1303         }
1304 }
1305
1306 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1307 {
1308         __tcf_block_put(block, NULL, NULL, rtnl_held);
1309 }
1310
1311 /* Find tcf block.
1312  * Set q, parent, cl when appropriate.
1313  */
1314
1315 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1316                                         u32 *parent, unsigned long *cl,
1317                                         int ifindex, u32 block_index,
1318                                         struct netlink_ext_ack *extack)
1319 {
1320         struct tcf_block *block;
1321         int err = 0;
1322
1323         ASSERT_RTNL();
1324
1325         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1326         if (err)
1327                 goto errout;
1328
1329         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1330         if (err)
1331                 goto errout_qdisc;
1332
1333         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1334         if (IS_ERR(block)) {
1335                 err = PTR_ERR(block);
1336                 goto errout_qdisc;
1337         }
1338
1339         return block;
1340
1341 errout_qdisc:
1342         if (*q)
1343                 qdisc_put(*q);
1344 errout:
1345         *q = NULL;
1346         return ERR_PTR(err);
1347 }
1348
1349 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1350                               bool rtnl_held)
1351 {
1352         if (!IS_ERR_OR_NULL(block))
1353                 tcf_block_refcnt_put(block, rtnl_held);
1354
1355         if (q) {
1356                 if (rtnl_held)
1357                         qdisc_put(q);
1358                 else
1359                         qdisc_put_unlocked(q);
1360         }
1361 }
1362
1363 struct tcf_block_owner_item {
1364         struct list_head list;
1365         struct Qdisc *q;
1366         enum flow_block_binder_type binder_type;
1367 };
1368
1369 static void
1370 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1371                                struct Qdisc *q,
1372                                enum flow_block_binder_type binder_type)
1373 {
1374         if (block->keep_dst &&
1375             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1376             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1377                 netif_keep_dst(qdisc_dev(q));
1378 }
1379
1380 void tcf_block_netif_keep_dst(struct tcf_block *block)
1381 {
1382         struct tcf_block_owner_item *item;
1383
1384         block->keep_dst = true;
1385         list_for_each_entry(item, &block->owner_list, list)
1386                 tcf_block_owner_netif_keep_dst(block, item->q,
1387                                                item->binder_type);
1388 }
1389 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1390
1391 static int tcf_block_owner_add(struct tcf_block *block,
1392                                struct Qdisc *q,
1393                                enum flow_block_binder_type binder_type)
1394 {
1395         struct tcf_block_owner_item *item;
1396
1397         item = kmalloc(sizeof(*item), GFP_KERNEL);
1398         if (!item)
1399                 return -ENOMEM;
1400         item->q = q;
1401         item->binder_type = binder_type;
1402         list_add(&item->list, &block->owner_list);
1403         return 0;
1404 }
1405
1406 static void tcf_block_owner_del(struct tcf_block *block,
1407                                 struct Qdisc *q,
1408                                 enum flow_block_binder_type binder_type)
1409 {
1410         struct tcf_block_owner_item *item;
1411
1412         list_for_each_entry(item, &block->owner_list, list) {
1413                 if (item->q == q && item->binder_type == binder_type) {
1414                         list_del(&item->list);
1415                         kfree(item);
1416                         return;
1417                 }
1418         }
1419         WARN_ON(1);
1420 }
1421
1422 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1423                       struct tcf_block_ext_info *ei,
1424                       struct netlink_ext_ack *extack)
1425 {
1426         struct net *net = qdisc_net(q);
1427         struct tcf_block *block = NULL;
1428         int err;
1429
1430         if (ei->block_index)
1431                 /* block_index not 0 means the shared block is requested */
1432                 block = tcf_block_refcnt_get(net, ei->block_index);
1433
1434         if (!block) {
1435                 block = tcf_block_create(net, q, ei->block_index, extack);
1436                 if (IS_ERR(block))
1437                         return PTR_ERR(block);
1438                 if (tcf_block_shared(block)) {
1439                         err = tcf_block_insert(block, net, extack);
1440                         if (err)
1441                                 goto err_block_insert;
1442                 }
1443         }
1444
1445         err = tcf_block_owner_add(block, q, ei->binder_type);
1446         if (err)
1447                 goto err_block_owner_add;
1448
1449         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1450
1451         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1452         if (err)
1453                 goto err_chain0_head_change_cb_add;
1454
1455         err = tcf_block_offload_bind(block, q, ei, extack);
1456         if (err)
1457                 goto err_block_offload_bind;
1458
1459         *p_block = block;
1460         return 0;
1461
1462 err_block_offload_bind:
1463         tcf_chain0_head_change_cb_del(block, ei);
1464 err_chain0_head_change_cb_add:
1465         tcf_block_owner_del(block, q, ei->binder_type);
1466 err_block_owner_add:
1467 err_block_insert:
1468         tcf_block_refcnt_put(block, true);
1469         return err;
1470 }
1471 EXPORT_SYMBOL(tcf_block_get_ext);
1472
1473 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1474 {
1475         struct tcf_proto __rcu **p_filter_chain = priv;
1476
1477         rcu_assign_pointer(*p_filter_chain, tp_head);
1478 }
1479
1480 int tcf_block_get(struct tcf_block **p_block,
1481                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1482                   struct netlink_ext_ack *extack)
1483 {
1484         struct tcf_block_ext_info ei = {
1485                 .chain_head_change = tcf_chain_head_change_dflt,
1486                 .chain_head_change_priv = p_filter_chain,
1487         };
1488
1489         WARN_ON(!p_filter_chain);
1490         return tcf_block_get_ext(p_block, q, &ei, extack);
1491 }
1492 EXPORT_SYMBOL(tcf_block_get);
1493
1494 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1495  * actions should be all removed after flushing.
1496  */
1497 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1498                        struct tcf_block_ext_info *ei)
1499 {
1500         if (!block)
1501                 return;
1502         tcf_chain0_head_change_cb_del(block, ei);
1503         tcf_block_owner_del(block, q, ei->binder_type);
1504
1505         __tcf_block_put(block, q, ei, true);
1506 }
1507 EXPORT_SYMBOL(tcf_block_put_ext);
1508
1509 void tcf_block_put(struct tcf_block *block)
1510 {
1511         struct tcf_block_ext_info ei = {0, };
1512
1513         if (!block)
1514                 return;
1515         tcf_block_put_ext(block, block->q, &ei);
1516 }
1517
1518 EXPORT_SYMBOL(tcf_block_put);
1519
1520 static int
1521 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1522                             void *cb_priv, bool add, bool offload_in_use,
1523                             struct netlink_ext_ack *extack)
1524 {
1525         struct tcf_chain *chain, *chain_prev;
1526         struct tcf_proto *tp, *tp_prev;
1527         int err;
1528
1529         for (chain = __tcf_get_next_chain(block, NULL);
1530              chain;
1531              chain_prev = chain,
1532                      chain = __tcf_get_next_chain(block, chain),
1533                      tcf_chain_put(chain_prev)) {
1534                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1535                      tp_prev = tp,
1536                              tp = __tcf_get_next_proto(chain, tp),
1537                              tcf_proto_put(tp_prev, true, NULL)) {
1538                         if (tp->ops->reoffload) {
1539                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1540                                                          extack);
1541                                 if (err && add)
1542                                         goto err_playback_remove;
1543                         } else if (add && offload_in_use) {
1544                                 err = -EOPNOTSUPP;
1545                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1546                                 goto err_playback_remove;
1547                         }
1548                 }
1549         }
1550
1551         return 0;
1552
1553 err_playback_remove:
1554         tcf_proto_put(tp, true, NULL);
1555         tcf_chain_put(chain);
1556         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1557                                     extack);
1558         return err;
1559 }
1560
1561 static int tcf_block_bind(struct tcf_block *block,
1562                           struct flow_block_offload *bo)
1563 {
1564         struct flow_block_cb *block_cb, *next;
1565         int err, i = 0;
1566
1567         list_for_each_entry(block_cb, &bo->cb_list, list) {
1568                 err = tcf_block_playback_offloads(block, block_cb->cb,
1569                                                   block_cb->cb_priv, true,
1570                                                   tcf_block_offload_in_use(block),
1571                                                   bo->extack);
1572                 if (err)
1573                         goto err_unroll;
1574
1575                 i++;
1576         }
1577         list_splice(&bo->cb_list, &block->flow_block.cb_list);
1578
1579         return 0;
1580
1581 err_unroll:
1582         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1583                 if (i-- > 0) {
1584                         list_del(&block_cb->list);
1585                         tcf_block_playback_offloads(block, block_cb->cb,
1586                                                     block_cb->cb_priv, false,
1587                                                     tcf_block_offload_in_use(block),
1588                                                     NULL);
1589                 }
1590                 flow_block_cb_free(block_cb);
1591         }
1592
1593         return err;
1594 }
1595
1596 static void tcf_block_unbind(struct tcf_block *block,
1597                              struct flow_block_offload *bo)
1598 {
1599         struct flow_block_cb *block_cb, *next;
1600
1601         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1602                 tcf_block_playback_offloads(block, block_cb->cb,
1603                                             block_cb->cb_priv, false,
1604                                             tcf_block_offload_in_use(block),
1605                                             NULL);
1606                 list_del(&block_cb->list);
1607                 flow_block_cb_free(block_cb);
1608         }
1609 }
1610
1611 static int tcf_block_setup(struct tcf_block *block,
1612                            struct flow_block_offload *bo)
1613 {
1614         int err;
1615
1616         switch (bo->command) {
1617         case FLOW_BLOCK_BIND:
1618                 err = tcf_block_bind(block, bo);
1619                 break;
1620         case FLOW_BLOCK_UNBIND:
1621                 err = 0;
1622                 tcf_block_unbind(block, bo);
1623                 break;
1624         default:
1625                 WARN_ON_ONCE(1);
1626                 err = -EOPNOTSUPP;
1627         }
1628
1629         return err;
1630 }
1631
1632 /* Main classifier routine: scans classifier chain attached
1633  * to this qdisc, (optionally) tests for protocol and asks
1634  * specific classifiers.
1635  */
1636 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1637                  struct tcf_result *res, bool compat_mode)
1638 {
1639 #ifdef CONFIG_NET_CLS_ACT
1640         const int max_reclassify_loop = 4;
1641         const struct tcf_proto *orig_tp = tp;
1642         const struct tcf_proto *first_tp;
1643         int limit = 0;
1644
1645 reclassify:
1646 #endif
1647         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1648                 __be16 protocol = tc_skb_protocol(skb);
1649                 int err;
1650
1651                 if (tp->protocol != protocol &&
1652                     tp->protocol != htons(ETH_P_ALL))
1653                         continue;
1654
1655                 err = tp->classify(skb, tp, res);
1656 #ifdef CONFIG_NET_CLS_ACT
1657                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1658                         first_tp = orig_tp;
1659                         goto reset;
1660                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1661                         first_tp = res->goto_tp;
1662                         goto reset;
1663                 }
1664 #endif
1665                 if (err >= 0)
1666                         return err;
1667         }
1668
1669         return TC_ACT_UNSPEC; /* signal: continue lookup */
1670 #ifdef CONFIG_NET_CLS_ACT
1671 reset:
1672         if (unlikely(limit++ >= max_reclassify_loop)) {
1673                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1674                                        tp->chain->block->index,
1675                                        tp->prio & 0xffff,
1676                                        ntohs(tp->protocol));
1677                 return TC_ACT_SHOT;
1678         }
1679
1680         tp = first_tp;
1681         goto reclassify;
1682 #endif
1683 }
1684 EXPORT_SYMBOL(tcf_classify);
1685
1686 struct tcf_chain_info {
1687         struct tcf_proto __rcu **pprev;
1688         struct tcf_proto __rcu *next;
1689 };
1690
1691 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1692                                            struct tcf_chain_info *chain_info)
1693 {
1694         return tcf_chain_dereference(*chain_info->pprev, chain);
1695 }
1696
1697 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1698                                struct tcf_chain_info *chain_info,
1699                                struct tcf_proto *tp)
1700 {
1701         if (chain->flushing)
1702                 return -EAGAIN;
1703
1704         if (*chain_info->pprev == chain->filter_chain)
1705                 tcf_chain0_head_change(chain, tp);
1706         tcf_proto_get(tp);
1707         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1708         rcu_assign_pointer(*chain_info->pprev, tp);
1709
1710         return 0;
1711 }
1712
1713 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1714                                 struct tcf_chain_info *chain_info,
1715                                 struct tcf_proto *tp)
1716 {
1717         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1718
1719         tcf_proto_mark_delete(tp);
1720         if (tp == chain->filter_chain)
1721                 tcf_chain0_head_change(chain, next);
1722         RCU_INIT_POINTER(*chain_info->pprev, next);
1723 }
1724
1725 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1726                                            struct tcf_chain_info *chain_info,
1727                                            u32 protocol, u32 prio,
1728                                            bool prio_allocate);
1729
1730 /* Try to insert new proto.
1731  * If proto with specified priority already exists, free new proto
1732  * and return existing one.
1733  */
1734
1735 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1736                                                     struct tcf_proto *tp_new,
1737                                                     u32 protocol, u32 prio,
1738                                                     bool rtnl_held)
1739 {
1740         struct tcf_chain_info chain_info;
1741         struct tcf_proto *tp;
1742         int err = 0;
1743
1744         mutex_lock(&chain->filter_chain_lock);
1745
1746         tp = tcf_chain_tp_find(chain, &chain_info,
1747                                protocol, prio, false);
1748         if (!tp)
1749                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1750         mutex_unlock(&chain->filter_chain_lock);
1751
1752         if (tp) {
1753                 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1754                 tp_new = tp;
1755         } else if (err) {
1756                 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1757                 tp_new = ERR_PTR(err);
1758         }
1759
1760         return tp_new;
1761 }
1762
1763 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1764                                       struct tcf_proto *tp, bool rtnl_held,
1765                                       struct netlink_ext_ack *extack)
1766 {
1767         struct tcf_chain_info chain_info;
1768         struct tcf_proto *tp_iter;
1769         struct tcf_proto **pprev;
1770         struct tcf_proto *next;
1771
1772         mutex_lock(&chain->filter_chain_lock);
1773
1774         /* Atomically find and remove tp from chain. */
1775         for (pprev = &chain->filter_chain;
1776              (tp_iter = tcf_chain_dereference(*pprev, chain));
1777              pprev = &tp_iter->next) {
1778                 if (tp_iter == tp) {
1779                         chain_info.pprev = pprev;
1780                         chain_info.next = tp_iter->next;
1781                         WARN_ON(tp_iter->deleting);
1782                         break;
1783                 }
1784         }
1785         /* Verify that tp still exists and no new filters were inserted
1786          * concurrently.
1787          * Mark tp for deletion if it is empty.
1788          */
1789         if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
1790                 mutex_unlock(&chain->filter_chain_lock);
1791                 return;
1792         }
1793
1794         next = tcf_chain_dereference(chain_info.next, chain);
1795         if (tp == chain->filter_chain)
1796                 tcf_chain0_head_change(chain, next);
1797         RCU_INIT_POINTER(*chain_info.pprev, next);
1798         mutex_unlock(&chain->filter_chain_lock);
1799
1800         tcf_proto_put(tp, rtnl_held, extack);
1801 }
1802
1803 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1804                                            struct tcf_chain_info *chain_info,
1805                                            u32 protocol, u32 prio,
1806                                            bool prio_allocate)
1807 {
1808         struct tcf_proto **pprev;
1809         struct tcf_proto *tp;
1810
1811         /* Check the chain for existence of proto-tcf with this priority */
1812         for (pprev = &chain->filter_chain;
1813              (tp = tcf_chain_dereference(*pprev, chain));
1814              pprev = &tp->next) {
1815                 if (tp->prio >= prio) {
1816                         if (tp->prio == prio) {
1817                                 if (prio_allocate ||
1818                                     (tp->protocol != protocol && protocol))
1819                                         return ERR_PTR(-EINVAL);
1820                         } else {
1821                                 tp = NULL;
1822                         }
1823                         break;
1824                 }
1825         }
1826         chain_info->pprev = pprev;
1827         if (tp) {
1828                 chain_info->next = tp->next;
1829                 tcf_proto_get(tp);
1830         } else {
1831                 chain_info->next = NULL;
1832         }
1833         return tp;
1834 }
1835
1836 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1837                          struct tcf_proto *tp, struct tcf_block *block,
1838                          struct Qdisc *q, u32 parent, void *fh,
1839                          u32 portid, u32 seq, u16 flags, int event,
1840                          bool rtnl_held)
1841 {
1842         struct tcmsg *tcm;
1843         struct nlmsghdr  *nlh;
1844         unsigned char *b = skb_tail_pointer(skb);
1845
1846         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1847         if (!nlh)
1848                 goto out_nlmsg_trim;
1849         tcm = nlmsg_data(nlh);
1850         tcm->tcm_family = AF_UNSPEC;
1851         tcm->tcm__pad1 = 0;
1852         tcm->tcm__pad2 = 0;
1853         if (q) {
1854                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1855                 tcm->tcm_parent = parent;
1856         } else {
1857                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1858                 tcm->tcm_block_index = block->index;
1859         }
1860         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1861         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1862                 goto nla_put_failure;
1863         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1864                 goto nla_put_failure;
1865         if (!fh) {
1866                 tcm->tcm_handle = 0;
1867         } else {
1868                 if (tp->ops->dump &&
1869                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1870                         goto nla_put_failure;
1871         }
1872         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1873         return skb->len;
1874
1875 out_nlmsg_trim:
1876 nla_put_failure:
1877         nlmsg_trim(skb, b);
1878         return -1;
1879 }
1880
1881 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1882                           struct nlmsghdr *n, struct tcf_proto *tp,
1883                           struct tcf_block *block, struct Qdisc *q,
1884                           u32 parent, void *fh, int event, bool unicast,
1885                           bool rtnl_held)
1886 {
1887         struct sk_buff *skb;
1888         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1889         int err = 0;
1890
1891         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1892         if (!skb)
1893                 return -ENOBUFS;
1894
1895         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1896                           n->nlmsg_seq, n->nlmsg_flags, event,
1897                           rtnl_held) <= 0) {
1898                 kfree_skb(skb);
1899                 return -EINVAL;
1900         }
1901
1902         if (unicast)
1903                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1904         else
1905                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1906                                      n->nlmsg_flags & NLM_F_ECHO);
1907
1908         if (err > 0)
1909                 err = 0;
1910         return err;
1911 }
1912
1913 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1914                               struct nlmsghdr *n, struct tcf_proto *tp,
1915                               struct tcf_block *block, struct Qdisc *q,
1916                               u32 parent, void *fh, bool unicast, bool *last,
1917                               bool rtnl_held, struct netlink_ext_ack *extack)
1918 {
1919         struct sk_buff *skb;
1920         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1921         int err;
1922
1923         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1924         if (!skb)
1925                 return -ENOBUFS;
1926
1927         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1928                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1929                           rtnl_held) <= 0) {
1930                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1931                 kfree_skb(skb);
1932                 return -EINVAL;
1933         }
1934
1935         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1936         if (err) {
1937                 kfree_skb(skb);
1938                 return err;
1939         }
1940
1941         if (unicast)
1942                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1943         else
1944                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1945                                      n->nlmsg_flags & NLM_F_ECHO);
1946         if (err < 0)
1947                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1948
1949         if (err > 0)
1950                 err = 0;
1951         return err;
1952 }
1953
1954 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1955                                  struct tcf_block *block, struct Qdisc *q,
1956                                  u32 parent, struct nlmsghdr *n,
1957                                  struct tcf_chain *chain, int event,
1958                                  bool rtnl_held)
1959 {
1960         struct tcf_proto *tp;
1961
1962         for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1963              tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1964                 tfilter_notify(net, oskb, n, tp, block,
1965                                q, parent, NULL, event, false, rtnl_held);
1966 }
1967
1968 static void tfilter_put(struct tcf_proto *tp, void *fh)
1969 {
1970         if (tp->ops->put && fh)
1971                 tp->ops->put(tp, fh);
1972 }
1973
1974 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1975                           struct netlink_ext_ack *extack)
1976 {
1977         struct net *net = sock_net(skb->sk);
1978         struct nlattr *tca[TCA_MAX + 1];
1979         struct tcmsg *t;
1980         u32 protocol;
1981         u32 prio;
1982         bool prio_allocate;
1983         u32 parent;
1984         u32 chain_index;
1985         struct Qdisc *q = NULL;
1986         struct tcf_chain_info chain_info;
1987         struct tcf_chain *chain = NULL;
1988         struct tcf_block *block;
1989         struct tcf_proto *tp;
1990         unsigned long cl;
1991         void *fh;
1992         int err;
1993         int tp_created;
1994         bool rtnl_held = false;
1995
1996         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1997                 return -EPERM;
1998
1999 replay:
2000         tp_created = 0;
2001
2002         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2003                                      rtm_tca_policy, extack);
2004         if (err < 0)
2005                 return err;
2006
2007         t = nlmsg_data(n);
2008         protocol = TC_H_MIN(t->tcm_info);
2009         prio = TC_H_MAJ(t->tcm_info);
2010         prio_allocate = false;
2011         parent = t->tcm_parent;
2012         tp = NULL;
2013         cl = 0;
2014         block = NULL;
2015
2016         if (prio == 0) {
2017                 /* If no priority is provided by the user,
2018                  * we allocate one.
2019                  */
2020                 if (n->nlmsg_flags & NLM_F_CREATE) {
2021                         prio = TC_H_MAKE(0x80000000U, 0U);
2022                         prio_allocate = true;
2023                 } else {
2024                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2025                         return -ENOENT;
2026                 }
2027         }
2028
2029         /* Find head of filter chain. */
2030
2031         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2032         if (err)
2033                 return err;
2034
2035         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2036          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2037          * type is not specified, classifier is not unlocked.
2038          */
2039         if (rtnl_held ||
2040             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2041             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2042                 rtnl_held = true;
2043                 rtnl_lock();
2044         }
2045
2046         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2047         if (err)
2048                 goto errout;
2049
2050         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2051                                  extack);
2052         if (IS_ERR(block)) {
2053                 err = PTR_ERR(block);
2054                 goto errout;
2055         }
2056
2057         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2058         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2059                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2060                 err = -EINVAL;
2061                 goto errout;
2062         }
2063         chain = tcf_chain_get(block, chain_index, true);
2064         if (!chain) {
2065                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2066                 err = -ENOMEM;
2067                 goto errout;
2068         }
2069
2070         mutex_lock(&chain->filter_chain_lock);
2071         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2072                                prio, prio_allocate);
2073         if (IS_ERR(tp)) {
2074                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2075                 err = PTR_ERR(tp);
2076                 goto errout_locked;
2077         }
2078
2079         if (tp == NULL) {
2080                 struct tcf_proto *tp_new = NULL;
2081
2082                 if (chain->flushing) {
2083                         err = -EAGAIN;
2084                         goto errout_locked;
2085                 }
2086
2087                 /* Proto-tcf does not exist, create new one */
2088
2089                 if (tca[TCA_KIND] == NULL || !protocol) {
2090                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2091                         err = -EINVAL;
2092                         goto errout_locked;
2093                 }
2094
2095                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2096                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2097                         err = -ENOENT;
2098                         goto errout_locked;
2099                 }
2100
2101                 if (prio_allocate)
2102                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2103                                                                &chain_info));
2104
2105                 mutex_unlock(&chain->filter_chain_lock);
2106                 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
2107                                           protocol, prio, chain, rtnl_held,
2108                                           extack);
2109                 if (IS_ERR(tp_new)) {
2110                         err = PTR_ERR(tp_new);
2111                         goto errout_tp;
2112                 }
2113
2114                 tp_created = 1;
2115                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2116                                                 rtnl_held);
2117                 if (IS_ERR(tp)) {
2118                         err = PTR_ERR(tp);
2119                         goto errout_tp;
2120                 }
2121         } else {
2122                 mutex_unlock(&chain->filter_chain_lock);
2123         }
2124
2125         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2126                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2127                 err = -EINVAL;
2128                 goto errout;
2129         }
2130
2131         fh = tp->ops->get(tp, t->tcm_handle);
2132
2133         if (!fh) {
2134                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2135                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2136                         err = -ENOENT;
2137                         goto errout;
2138                 }
2139         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2140                 tfilter_put(tp, fh);
2141                 NL_SET_ERR_MSG(extack, "Filter already exists");
2142                 err = -EEXIST;
2143                 goto errout;
2144         }
2145
2146         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2147                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2148                 err = -EINVAL;
2149                 goto errout;
2150         }
2151
2152         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2153                               n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2154                               rtnl_held, extack);
2155         if (err == 0) {
2156                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2157                                RTM_NEWTFILTER, false, rtnl_held);
2158                 tfilter_put(tp, fh);
2159                 /* q pointer is NULL for shared blocks */
2160                 if (q)
2161                         q->flags &= ~TCQ_F_CAN_BYPASS;
2162         }
2163
2164 errout:
2165         if (err && tp_created)
2166                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2167 errout_tp:
2168         if (chain) {
2169                 if (tp && !IS_ERR(tp))
2170                         tcf_proto_put(tp, rtnl_held, NULL);
2171                 if (!tp_created)
2172                         tcf_chain_put(chain);
2173         }
2174         tcf_block_release(q, block, rtnl_held);
2175
2176         if (rtnl_held)
2177                 rtnl_unlock();
2178
2179         if (err == -EAGAIN) {
2180                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2181                  * of target chain.
2182                  */
2183                 rtnl_held = true;
2184                 /* Replay the request. */
2185                 goto replay;
2186         }
2187         return err;
2188
2189 errout_locked:
2190         mutex_unlock(&chain->filter_chain_lock);
2191         goto errout;
2192 }
2193
2194 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2195                           struct netlink_ext_ack *extack)
2196 {
2197         struct net *net = sock_net(skb->sk);
2198         struct nlattr *tca[TCA_MAX + 1];
2199         struct tcmsg *t;
2200         u32 protocol;
2201         u32 prio;
2202         u32 parent;
2203         u32 chain_index;
2204         struct Qdisc *q = NULL;
2205         struct tcf_chain_info chain_info;
2206         struct tcf_chain *chain = NULL;
2207         struct tcf_block *block = NULL;
2208         struct tcf_proto *tp = NULL;
2209         unsigned long cl = 0;
2210         void *fh = NULL;
2211         int err;
2212         bool rtnl_held = false;
2213
2214         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2215                 return -EPERM;
2216
2217         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2218                                      rtm_tca_policy, extack);
2219         if (err < 0)
2220                 return err;
2221
2222         t = nlmsg_data(n);
2223         protocol = TC_H_MIN(t->tcm_info);
2224         prio = TC_H_MAJ(t->tcm_info);
2225         parent = t->tcm_parent;
2226
2227         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2228                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2229                 return -ENOENT;
2230         }
2231
2232         /* Find head of filter chain. */
2233
2234         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2235         if (err)
2236                 return err;
2237
2238         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2239          * found), qdisc is not unlocked, classifier type is not specified,
2240          * classifier is not unlocked.
2241          */
2242         if (!prio ||
2243             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2244             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2245                 rtnl_held = true;
2246                 rtnl_lock();
2247         }
2248
2249         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2250         if (err)
2251                 goto errout;
2252
2253         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2254                                  extack);
2255         if (IS_ERR(block)) {
2256                 err = PTR_ERR(block);
2257                 goto errout;
2258         }
2259
2260         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2261         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2262                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2263                 err = -EINVAL;
2264                 goto errout;
2265         }
2266         chain = tcf_chain_get(block, chain_index, false);
2267         if (!chain) {
2268                 /* User requested flush on non-existent chain. Nothing to do,
2269                  * so just return success.
2270                  */
2271                 if (prio == 0) {
2272                         err = 0;
2273                         goto errout;
2274                 }
2275                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2276                 err = -ENOENT;
2277                 goto errout;
2278         }
2279
2280         if (prio == 0) {
2281                 tfilter_notify_chain(net, skb, block, q, parent, n,
2282                                      chain, RTM_DELTFILTER, rtnl_held);
2283                 tcf_chain_flush(chain, rtnl_held);
2284                 err = 0;
2285                 goto errout;
2286         }
2287
2288         mutex_lock(&chain->filter_chain_lock);
2289         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2290                                prio, false);
2291         if (!tp || IS_ERR(tp)) {
2292                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2293                 err = tp ? PTR_ERR(tp) : -ENOENT;
2294                 goto errout_locked;
2295         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2296                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2297                 err = -EINVAL;
2298                 goto errout_locked;
2299         } else if (t->tcm_handle == 0) {
2300                 tcf_chain_tp_remove(chain, &chain_info, tp);
2301                 mutex_unlock(&chain->filter_chain_lock);
2302
2303                 tcf_proto_put(tp, rtnl_held, NULL);
2304                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2305                                RTM_DELTFILTER, false, rtnl_held);
2306                 err = 0;
2307                 goto errout;
2308         }
2309         mutex_unlock(&chain->filter_chain_lock);
2310
2311         fh = tp->ops->get(tp, t->tcm_handle);
2312
2313         if (!fh) {
2314                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2315                 err = -ENOENT;
2316         } else {
2317                 bool last;
2318
2319                 err = tfilter_del_notify(net, skb, n, tp, block,
2320                                          q, parent, fh, false, &last,
2321                                          rtnl_held, extack);
2322
2323                 if (err)
2324                         goto errout;
2325                 if (last)
2326                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2327         }
2328
2329 errout:
2330         if (chain) {
2331                 if (tp && !IS_ERR(tp))
2332                         tcf_proto_put(tp, rtnl_held, NULL);
2333                 tcf_chain_put(chain);
2334         }
2335         tcf_block_release(q, block, rtnl_held);
2336
2337         if (rtnl_held)
2338                 rtnl_unlock();
2339
2340         return err;
2341
2342 errout_locked:
2343         mutex_unlock(&chain->filter_chain_lock);
2344         goto errout;
2345 }
2346
2347 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2348                           struct netlink_ext_ack *extack)
2349 {
2350         struct net *net = sock_net(skb->sk);
2351         struct nlattr *tca[TCA_MAX + 1];
2352         struct tcmsg *t;
2353         u32 protocol;
2354         u32 prio;
2355         u32 parent;
2356         u32 chain_index;
2357         struct Qdisc *q = NULL;
2358         struct tcf_chain_info chain_info;
2359         struct tcf_chain *chain = NULL;
2360         struct tcf_block *block = NULL;
2361         struct tcf_proto *tp = NULL;
2362         unsigned long cl = 0;
2363         void *fh = NULL;
2364         int err;
2365         bool rtnl_held = false;
2366
2367         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2368                                      rtm_tca_policy, extack);
2369         if (err < 0)
2370                 return err;
2371
2372         t = nlmsg_data(n);
2373         protocol = TC_H_MIN(t->tcm_info);
2374         prio = TC_H_MAJ(t->tcm_info);
2375         parent = t->tcm_parent;
2376
2377         if (prio == 0) {
2378                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2379                 return -ENOENT;
2380         }
2381
2382         /* Find head of filter chain. */
2383
2384         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2385         if (err)
2386                 return err;
2387
2388         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2389          * unlocked, classifier type is not specified, classifier is not
2390          * unlocked.
2391          */
2392         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2393             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2394                 rtnl_held = true;
2395                 rtnl_lock();
2396         }
2397
2398         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2399         if (err)
2400                 goto errout;
2401
2402         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2403                                  extack);
2404         if (IS_ERR(block)) {
2405                 err = PTR_ERR(block);
2406                 goto errout;
2407         }
2408
2409         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2410         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2411                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2412                 err = -EINVAL;
2413                 goto errout;
2414         }
2415         chain = tcf_chain_get(block, chain_index, false);
2416         if (!chain) {
2417                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2418                 err = -EINVAL;
2419                 goto errout;
2420         }
2421
2422         mutex_lock(&chain->filter_chain_lock);
2423         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2424                                prio, false);
2425         mutex_unlock(&chain->filter_chain_lock);
2426         if (!tp || IS_ERR(tp)) {
2427                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2428                 err = tp ? PTR_ERR(tp) : -ENOENT;
2429                 goto errout;
2430         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2431                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2432                 err = -EINVAL;
2433                 goto errout;
2434         }
2435
2436         fh = tp->ops->get(tp, t->tcm_handle);
2437
2438         if (!fh) {
2439                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2440                 err = -ENOENT;
2441         } else {
2442                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2443                                      fh, RTM_NEWTFILTER, true, rtnl_held);
2444                 if (err < 0)
2445                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2446         }
2447
2448         tfilter_put(tp, fh);
2449 errout:
2450         if (chain) {
2451                 if (tp && !IS_ERR(tp))
2452                         tcf_proto_put(tp, rtnl_held, NULL);
2453                 tcf_chain_put(chain);
2454         }
2455         tcf_block_release(q, block, rtnl_held);
2456
2457         if (rtnl_held)
2458                 rtnl_unlock();
2459
2460         return err;
2461 }
2462
2463 struct tcf_dump_args {
2464         struct tcf_walker w;
2465         struct sk_buff *skb;
2466         struct netlink_callback *cb;
2467         struct tcf_block *block;
2468         struct Qdisc *q;
2469         u32 parent;
2470 };
2471
2472 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2473 {
2474         struct tcf_dump_args *a = (void *)arg;
2475         struct net *net = sock_net(a->skb->sk);
2476
2477         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2478                              n, NETLINK_CB(a->cb->skb).portid,
2479                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2480                              RTM_NEWTFILTER, true);
2481 }
2482
2483 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2484                            struct sk_buff *skb, struct netlink_callback *cb,
2485                            long index_start, long *p_index)
2486 {
2487         struct net *net = sock_net(skb->sk);
2488         struct tcf_block *block = chain->block;
2489         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2490         struct tcf_proto *tp, *tp_prev;
2491         struct tcf_dump_args arg;
2492
2493         for (tp = __tcf_get_next_proto(chain, NULL);
2494              tp;
2495              tp_prev = tp,
2496                      tp = __tcf_get_next_proto(chain, tp),
2497                      tcf_proto_put(tp_prev, true, NULL),
2498                      (*p_index)++) {
2499                 if (*p_index < index_start)
2500                         continue;
2501                 if (TC_H_MAJ(tcm->tcm_info) &&
2502                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2503                         continue;
2504                 if (TC_H_MIN(tcm->tcm_info) &&
2505                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2506                         continue;
2507                 if (*p_index > index_start)
2508                         memset(&cb->args[1], 0,
2509                                sizeof(cb->args) - sizeof(cb->args[0]));
2510                 if (cb->args[1] == 0) {
2511                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2512                                           NETLINK_CB(cb->skb).portid,
2513                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2514                                           RTM_NEWTFILTER, true) <= 0)
2515                                 goto errout;
2516                         cb->args[1] = 1;
2517                 }
2518                 if (!tp->ops->walk)
2519                         continue;
2520                 arg.w.fn = tcf_node_dump;
2521                 arg.skb = skb;
2522                 arg.cb = cb;
2523                 arg.block = block;
2524                 arg.q = q;
2525                 arg.parent = parent;
2526                 arg.w.stop = 0;
2527                 arg.w.skip = cb->args[1] - 1;
2528                 arg.w.count = 0;
2529                 arg.w.cookie = cb->args[2];
2530                 tp->ops->walk(tp, &arg.w, true);
2531                 cb->args[2] = arg.w.cookie;
2532                 cb->args[1] = arg.w.count + 1;
2533                 if (arg.w.stop)
2534                         goto errout;
2535         }
2536         return true;
2537
2538 errout:
2539         tcf_proto_put(tp, true, NULL);
2540         return false;
2541 }
2542
2543 /* called with RTNL */
2544 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2545 {
2546         struct tcf_chain *chain, *chain_prev;
2547         struct net *net = sock_net(skb->sk);
2548         struct nlattr *tca[TCA_MAX + 1];
2549         struct Qdisc *q = NULL;
2550         struct tcf_block *block;
2551         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2552         long index_start;
2553         long index;
2554         u32 parent;
2555         int err;
2556
2557         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2558                 return skb->len;
2559
2560         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2561                                      NULL, cb->extack);
2562         if (err)
2563                 return err;
2564
2565         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2566                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2567                 if (!block)
2568                         goto out;
2569                 /* If we work with block index, q is NULL and parent value
2570                  * will never be used in the following code. The check
2571                  * in tcf_fill_node prevents it. However, compiler does not
2572                  * see that far, so set parent to zero to silence the warning
2573                  * about parent being uninitialized.
2574                  */
2575                 parent = 0;
2576         } else {
2577                 const struct Qdisc_class_ops *cops;
2578                 struct net_device *dev;
2579                 unsigned long cl = 0;
2580
2581                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2582                 if (!dev)
2583                         return skb->len;
2584
2585                 parent = tcm->tcm_parent;
2586                 if (!parent) {
2587                         q = dev->qdisc;
2588                         parent = q->handle;
2589                 } else {
2590                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2591                 }
2592                 if (!q)
2593                         goto out;
2594                 cops = q->ops->cl_ops;
2595                 if (!cops)
2596                         goto out;
2597                 if (!cops->tcf_block)
2598                         goto out;
2599                 if (TC_H_MIN(tcm->tcm_parent)) {
2600                         cl = cops->find(q, tcm->tcm_parent);
2601                         if (cl == 0)
2602                                 goto out;
2603                 }
2604                 block = cops->tcf_block(q, cl, NULL);
2605                 if (!block)
2606                         goto out;
2607                 if (tcf_block_shared(block))
2608                         q = NULL;
2609         }
2610
2611         index_start = cb->args[0];
2612         index = 0;
2613
2614         for (chain = __tcf_get_next_chain(block, NULL);
2615              chain;
2616              chain_prev = chain,
2617                      chain = __tcf_get_next_chain(block, chain),
2618                      tcf_chain_put(chain_prev)) {
2619                 if (tca[TCA_CHAIN] &&
2620                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2621                         continue;
2622                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2623                                     index_start, &index)) {
2624                         tcf_chain_put(chain);
2625                         err = -EMSGSIZE;
2626                         break;
2627                 }
2628         }
2629
2630         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2631                 tcf_block_refcnt_put(block, true);
2632         cb->args[0] = index;
2633
2634 out:
2635         /* If we did no progress, the error (EMSGSIZE) is real */
2636         if (skb->len == 0 && err)
2637                 return err;
2638         return skb->len;
2639 }
2640
2641 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2642                               void *tmplt_priv, u32 chain_index,
2643                               struct net *net, struct sk_buff *skb,
2644                               struct tcf_block *block,
2645                               u32 portid, u32 seq, u16 flags, int event)
2646 {
2647         unsigned char *b = skb_tail_pointer(skb);
2648         const struct tcf_proto_ops *ops;
2649         struct nlmsghdr *nlh;
2650         struct tcmsg *tcm;
2651         void *priv;
2652
2653         ops = tmplt_ops;
2654         priv = tmplt_priv;
2655
2656         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2657         if (!nlh)
2658                 goto out_nlmsg_trim;
2659         tcm = nlmsg_data(nlh);
2660         tcm->tcm_family = AF_UNSPEC;
2661         tcm->tcm__pad1 = 0;
2662         tcm->tcm__pad2 = 0;
2663         tcm->tcm_handle = 0;
2664         if (block->q) {
2665                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2666                 tcm->tcm_parent = block->q->handle;
2667         } else {
2668                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2669                 tcm->tcm_block_index = block->index;
2670         }
2671
2672         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2673                 goto nla_put_failure;
2674
2675         if (ops) {
2676                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2677                         goto nla_put_failure;
2678                 if (ops->tmplt_dump(skb, net, priv) < 0)
2679                         goto nla_put_failure;
2680         }
2681
2682         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2683         return skb->len;
2684
2685 out_nlmsg_trim:
2686 nla_put_failure:
2687         nlmsg_trim(skb, b);
2688         return -EMSGSIZE;
2689 }
2690
2691 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2692                            u32 seq, u16 flags, int event, bool unicast)
2693 {
2694         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2695         struct tcf_block *block = chain->block;
2696         struct net *net = block->net;
2697         struct sk_buff *skb;
2698         int err = 0;
2699
2700         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2701         if (!skb)
2702                 return -ENOBUFS;
2703
2704         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2705                                chain->index, net, skb, block, portid,
2706                                seq, flags, event) <= 0) {
2707                 kfree_skb(skb);
2708                 return -EINVAL;
2709         }
2710
2711         if (unicast)
2712                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2713         else
2714                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2715                                      flags & NLM_F_ECHO);
2716
2717         if (err > 0)
2718                 err = 0;
2719         return err;
2720 }
2721
2722 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2723                                   void *tmplt_priv, u32 chain_index,
2724                                   struct tcf_block *block, struct sk_buff *oskb,
2725                                   u32 seq, u16 flags, bool unicast)
2726 {
2727         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2728         struct net *net = block->net;
2729         struct sk_buff *skb;
2730
2731         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2732         if (!skb)
2733                 return -ENOBUFS;
2734
2735         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2736                                block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2737                 kfree_skb(skb);
2738                 return -EINVAL;
2739         }
2740
2741         if (unicast)
2742                 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2743
2744         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2745 }
2746
2747 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2748                               struct nlattr **tca,
2749                               struct netlink_ext_ack *extack)
2750 {
2751         const struct tcf_proto_ops *ops;
2752         void *tmplt_priv;
2753
2754         /* If kind is not set, user did not specify template. */
2755         if (!tca[TCA_KIND])
2756                 return 0;
2757
2758         ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
2759         if (IS_ERR(ops))
2760                 return PTR_ERR(ops);
2761         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2762                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2763                 return -EOPNOTSUPP;
2764         }
2765
2766         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2767         if (IS_ERR(tmplt_priv)) {
2768                 module_put(ops->owner);
2769                 return PTR_ERR(tmplt_priv);
2770         }
2771         chain->tmplt_ops = ops;
2772         chain->tmplt_priv = tmplt_priv;
2773         return 0;
2774 }
2775
2776 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2777                                void *tmplt_priv)
2778 {
2779         /* If template ops are set, no work to do for us. */
2780         if (!tmplt_ops)
2781                 return;
2782
2783         tmplt_ops->tmplt_destroy(tmplt_priv);
2784         module_put(tmplt_ops->owner);
2785 }
2786
2787 /* Add/delete/get a chain */
2788
2789 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2790                         struct netlink_ext_ack *extack)
2791 {
2792         struct net *net = sock_net(skb->sk);
2793         struct nlattr *tca[TCA_MAX + 1];
2794         struct tcmsg *t;
2795         u32 parent;
2796         u32 chain_index;
2797         struct Qdisc *q = NULL;
2798         struct tcf_chain *chain = NULL;
2799         struct tcf_block *block;
2800         unsigned long cl;
2801         int err;
2802
2803         if (n->nlmsg_type != RTM_GETCHAIN &&
2804             !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2805                 return -EPERM;
2806
2807 replay:
2808         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2809                                      rtm_tca_policy, extack);
2810         if (err < 0)
2811                 return err;
2812
2813         t = nlmsg_data(n);
2814         parent = t->tcm_parent;
2815         cl = 0;
2816
2817         block = tcf_block_find(net, &q, &parent, &cl,
2818                                t->tcm_ifindex, t->tcm_block_index, extack);
2819         if (IS_ERR(block))
2820                 return PTR_ERR(block);
2821
2822         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2823         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2824                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2825                 err = -EINVAL;
2826                 goto errout_block;
2827         }
2828
2829         mutex_lock(&block->lock);
2830         chain = tcf_chain_lookup(block, chain_index);
2831         if (n->nlmsg_type == RTM_NEWCHAIN) {
2832                 if (chain) {
2833                         if (tcf_chain_held_by_acts_only(chain)) {
2834                                 /* The chain exists only because there is
2835                                  * some action referencing it.
2836                                  */
2837                                 tcf_chain_hold(chain);
2838                         } else {
2839                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2840                                 err = -EEXIST;
2841                                 goto errout_block_locked;
2842                         }
2843                 } else {
2844                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2845                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2846                                 err = -ENOENT;
2847                                 goto errout_block_locked;
2848                         }
2849                         chain = tcf_chain_create(block, chain_index);
2850                         if (!chain) {
2851                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2852                                 err = -ENOMEM;
2853                                 goto errout_block_locked;
2854                         }
2855                 }
2856         } else {
2857                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2858                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2859                         err = -EINVAL;
2860                         goto errout_block_locked;
2861                 }
2862                 tcf_chain_hold(chain);
2863         }
2864
2865         if (n->nlmsg_type == RTM_NEWCHAIN) {
2866                 /* Modifying chain requires holding parent block lock. In case
2867                  * the chain was successfully added, take a reference to the
2868                  * chain. This ensures that an empty chain does not disappear at
2869                  * the end of this function.
2870                  */
2871                 tcf_chain_hold(chain);
2872                 chain->explicitly_created = true;
2873         }
2874         mutex_unlock(&block->lock);
2875
2876         switch (n->nlmsg_type) {
2877         case RTM_NEWCHAIN:
2878                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2879                 if (err) {
2880                         tcf_chain_put_explicitly_created(chain);
2881                         goto errout;
2882                 }
2883
2884                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2885                                 RTM_NEWCHAIN, false);
2886                 break;
2887         case RTM_DELCHAIN:
2888                 tfilter_notify_chain(net, skb, block, q, parent, n,
2889                                      chain, RTM_DELTFILTER, true);
2890                 /* Flush the chain first as the user requested chain removal. */
2891                 tcf_chain_flush(chain, true);
2892                 /* In case the chain was successfully deleted, put a reference
2893                  * to the chain previously taken during addition.
2894                  */
2895                 tcf_chain_put_explicitly_created(chain);
2896                 break;
2897         case RTM_GETCHAIN:
2898                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2899                                       n->nlmsg_seq, n->nlmsg_type, true);
2900                 if (err < 0)
2901                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2902                 break;
2903         default:
2904                 err = -EOPNOTSUPP;
2905                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2906                 goto errout;
2907         }
2908
2909 errout:
2910         tcf_chain_put(chain);
2911 errout_block:
2912         tcf_block_release(q, block, true);
2913         if (err == -EAGAIN)
2914                 /* Replay the request. */
2915                 goto replay;
2916         return err;
2917
2918 errout_block_locked:
2919         mutex_unlock(&block->lock);
2920         goto errout_block;
2921 }
2922
2923 /* called with RTNL */
2924 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2925 {
2926         struct net *net = sock_net(skb->sk);
2927         struct nlattr *tca[TCA_MAX + 1];
2928         struct Qdisc *q = NULL;
2929         struct tcf_block *block;
2930         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2931         struct tcf_chain *chain;
2932         long index_start;
2933         long index;
2934         u32 parent;
2935         int err;
2936
2937         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2938                 return skb->len;
2939
2940         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2941                                      rtm_tca_policy, cb->extack);
2942         if (err)
2943                 return err;
2944
2945         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2946                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2947                 if (!block)
2948                         goto out;
2949                 /* If we work with block index, q is NULL and parent value
2950                  * will never be used in the following code. The check
2951                  * in tcf_fill_node prevents it. However, compiler does not
2952                  * see that far, so set parent to zero to silence the warning
2953                  * about parent being uninitialized.
2954                  */
2955                 parent = 0;
2956         } else {
2957                 const struct Qdisc_class_ops *cops;
2958                 struct net_device *dev;
2959                 unsigned long cl = 0;
2960
2961                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2962                 if (!dev)
2963                         return skb->len;
2964
2965                 parent = tcm->tcm_parent;
2966                 if (!parent) {
2967                         q = dev->qdisc;
2968                         parent = q->handle;
2969                 } else {
2970                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2971                 }
2972                 if (!q)
2973                         goto out;
2974                 cops = q->ops->cl_ops;
2975                 if (!cops)
2976                         goto out;
2977                 if (!cops->tcf_block)
2978                         goto out;
2979                 if (TC_H_MIN(tcm->tcm_parent)) {
2980                         cl = cops->find(q, tcm->tcm_parent);
2981                         if (cl == 0)
2982                                 goto out;
2983                 }
2984                 block = cops->tcf_block(q, cl, NULL);
2985                 if (!block)
2986                         goto out;
2987                 if (tcf_block_shared(block))
2988                         q = NULL;
2989         }
2990
2991         index_start = cb->args[0];
2992         index = 0;
2993
2994         mutex_lock(&block->lock);
2995         list_for_each_entry(chain, &block->chain_list, list) {
2996                 if ((tca[TCA_CHAIN] &&
2997                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2998                         continue;
2999                 if (index < index_start) {
3000                         index++;
3001                         continue;
3002                 }
3003                 if (tcf_chain_held_by_acts_only(chain))
3004                         continue;
3005                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3006                                          chain->index, net, skb, block,
3007                                          NETLINK_CB(cb->skb).portid,
3008                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3009                                          RTM_NEWCHAIN);
3010                 if (err <= 0)
3011                         break;
3012                 index++;
3013         }
3014         mutex_unlock(&block->lock);
3015
3016         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3017                 tcf_block_refcnt_put(block, true);
3018         cb->args[0] = index;
3019
3020 out:
3021         /* If we did no progress, the error (EMSGSIZE) is real */
3022         if (skb->len == 0 && err)
3023                 return err;
3024         return skb->len;
3025 }
3026
3027 void tcf_exts_destroy(struct tcf_exts *exts)
3028 {
3029 #ifdef CONFIG_NET_CLS_ACT
3030         tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3031         kfree(exts->actions);
3032         exts->nr_actions = 0;
3033 #endif
3034 }
3035 EXPORT_SYMBOL(tcf_exts_destroy);
3036
3037 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3038                       struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3039                       bool rtnl_held, struct netlink_ext_ack *extack)
3040 {
3041 #ifdef CONFIG_NET_CLS_ACT
3042         {
3043                 struct tc_action *act;
3044                 size_t attr_size = 0;
3045
3046                 if (exts->police && tb[exts->police]) {
3047                         act = tcf_action_init_1(net, tp, tb[exts->police],
3048                                                 rate_tlv, "police", ovr,
3049                                                 TCA_ACT_BIND, rtnl_held,
3050                                                 extack);
3051                         if (IS_ERR(act))
3052                                 return PTR_ERR(act);
3053
3054                         act->type = exts->type = TCA_OLD_COMPAT;
3055                         exts->actions[0] = act;
3056                         exts->nr_actions = 1;
3057                 } else if (exts->action && tb[exts->action]) {
3058                         int err;
3059
3060                         err = tcf_action_init(net, tp, tb[exts->action],
3061                                               rate_tlv, NULL, ovr, TCA_ACT_BIND,
3062                                               exts->actions, &attr_size,
3063                                               rtnl_held, extack);
3064                         if (err < 0)
3065                                 return err;
3066                         exts->nr_actions = err;
3067                 }
3068         }
3069 #else
3070         if ((exts->action && tb[exts->action]) ||
3071             (exts->police && tb[exts->police])) {
3072                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3073                 return -EOPNOTSUPP;
3074         }
3075 #endif
3076
3077         return 0;
3078 }
3079 EXPORT_SYMBOL(tcf_exts_validate);
3080
3081 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3082 {
3083 #ifdef CONFIG_NET_CLS_ACT
3084         struct tcf_exts old = *dst;
3085
3086         *dst = *src;
3087         tcf_exts_destroy(&old);
3088 #endif
3089 }
3090 EXPORT_SYMBOL(tcf_exts_change);
3091
3092 #ifdef CONFIG_NET_CLS_ACT
3093 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3094 {
3095         if (exts->nr_actions == 0)
3096                 return NULL;
3097         else
3098                 return exts->actions[0];
3099 }
3100 #endif
3101
3102 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3103 {
3104 #ifdef CONFIG_NET_CLS_ACT
3105         struct nlattr *nest;
3106
3107         if (exts->action && tcf_exts_has_actions(exts)) {
3108                 /*
3109                  * again for backward compatible mode - we want
3110                  * to work with both old and new modes of entering
3111                  * tc data even if iproute2  was newer - jhs
3112                  */
3113                 if (exts->type != TCA_OLD_COMPAT) {
3114                         nest = nla_nest_start_noflag(skb, exts->action);
3115                         if (nest == NULL)
3116                                 goto nla_put_failure;
3117
3118                         if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
3119                                 goto nla_put_failure;
3120                         nla_nest_end(skb, nest);
3121                 } else if (exts->police) {
3122                         struct tc_action *act = tcf_exts_first_act(exts);
3123                         nest = nla_nest_start_noflag(skb, exts->police);
3124                         if (nest == NULL || !act)
3125                                 goto nla_put_failure;
3126                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3127                                 goto nla_put_failure;
3128                         nla_nest_end(skb, nest);
3129                 }
3130         }
3131         return 0;
3132
3133 nla_put_failure:
3134         nla_nest_cancel(skb, nest);
3135         return -1;
3136 #else
3137         return 0;
3138 #endif
3139 }
3140 EXPORT_SYMBOL(tcf_exts_dump);
3141
3142
3143 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3144 {
3145 #ifdef CONFIG_NET_CLS_ACT
3146         struct tc_action *a = tcf_exts_first_act(exts);
3147         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3148                 return -1;
3149 #endif
3150         return 0;
3151 }
3152 EXPORT_SYMBOL(tcf_exts_dump_stats);
3153
3154 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3155                      void *type_data, bool err_stop)
3156 {
3157         struct flow_block_cb *block_cb;
3158         int ok_count = 0;
3159         int err;
3160
3161         /* Make sure all netdevs sharing this block are offload-capable. */
3162         if (block->nooffloaddevcnt && err_stop)
3163                 return -EOPNOTSUPP;
3164
3165         list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3166                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3167                 if (err) {
3168                         if (err_stop)
3169                                 return err;
3170                 } else {
3171                         ok_count++;
3172                 }
3173         }
3174         return ok_count;
3175 }
3176 EXPORT_SYMBOL(tc_setup_cb_call);
3177
3178 int tc_setup_flow_action(struct flow_action *flow_action,
3179                          const struct tcf_exts *exts)
3180 {
3181         const struct tc_action *act;
3182         int i, j, k;
3183
3184         if (!exts)
3185                 return 0;
3186
3187         j = 0;
3188         tcf_exts_for_each_action(i, act, exts) {
3189                 struct flow_action_entry *entry;
3190
3191                 entry = &flow_action->entries[j];
3192                 if (is_tcf_gact_ok(act)) {
3193                         entry->id = FLOW_ACTION_ACCEPT;
3194                 } else if (is_tcf_gact_shot(act)) {
3195                         entry->id = FLOW_ACTION_DROP;
3196                 } else if (is_tcf_gact_trap(act)) {
3197                         entry->id = FLOW_ACTION_TRAP;
3198                 } else if (is_tcf_gact_goto_chain(act)) {
3199                         entry->id = FLOW_ACTION_GOTO;
3200                         entry->chain_index = tcf_gact_goto_chain_index(act);
3201                 } else if (is_tcf_mirred_egress_redirect(act)) {
3202                         entry->id = FLOW_ACTION_REDIRECT;
3203                         entry->dev = tcf_mirred_dev(act);
3204                 } else if (is_tcf_mirred_egress_mirror(act)) {
3205                         entry->id = FLOW_ACTION_MIRRED;
3206                         entry->dev = tcf_mirred_dev(act);
3207                 } else if (is_tcf_vlan(act)) {
3208                         switch (tcf_vlan_action(act)) {
3209                         case TCA_VLAN_ACT_PUSH:
3210                                 entry->id = FLOW_ACTION_VLAN_PUSH;
3211                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3212                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3213                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3214                                 break;
3215                         case TCA_VLAN_ACT_POP:
3216                                 entry->id = FLOW_ACTION_VLAN_POP;
3217                                 break;
3218                         case TCA_VLAN_ACT_MODIFY:
3219                                 entry->id = FLOW_ACTION_VLAN_MANGLE;
3220                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3221                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3222                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3223                                 break;
3224                         default:
3225                                 goto err_out;
3226                         }
3227                 } else if (is_tcf_tunnel_set(act)) {
3228                         entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3229                         entry->tunnel = tcf_tunnel_info(act);
3230                 } else if (is_tcf_tunnel_release(act)) {
3231                         entry->id = FLOW_ACTION_TUNNEL_DECAP;
3232                 } else if (is_tcf_pedit(act)) {
3233                         for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3234                                 switch (tcf_pedit_cmd(act, k)) {
3235                                 case TCA_PEDIT_KEY_EX_CMD_SET:
3236                                         entry->id = FLOW_ACTION_MANGLE;
3237                                         break;
3238                                 case TCA_PEDIT_KEY_EX_CMD_ADD:
3239                                         entry->id = FLOW_ACTION_ADD;
3240                                         break;
3241                                 default:
3242                                         goto err_out;
3243                                 }
3244                                 entry->mangle.htype = tcf_pedit_htype(act, k);
3245                                 entry->mangle.mask = tcf_pedit_mask(act, k);
3246                                 entry->mangle.val = tcf_pedit_val(act, k);
3247                                 entry->mangle.offset = tcf_pedit_offset(act, k);
3248                                 entry = &flow_action->entries[++j];
3249                         }
3250                 } else if (is_tcf_csum(act)) {
3251                         entry->id = FLOW_ACTION_CSUM;
3252                         entry->csum_flags = tcf_csum_update_flags(act);
3253                 } else if (is_tcf_skbedit_mark(act)) {
3254                         entry->id = FLOW_ACTION_MARK;
3255                         entry->mark = tcf_skbedit_mark(act);
3256                 } else if (is_tcf_sample(act)) {
3257                         entry->id = FLOW_ACTION_SAMPLE;
3258                         entry->sample.psample_group =
3259                                 tcf_sample_psample_group(act);
3260                         entry->sample.trunc_size = tcf_sample_trunc_size(act);
3261                         entry->sample.truncate = tcf_sample_truncate(act);
3262                         entry->sample.rate = tcf_sample_rate(act);
3263                 } else if (is_tcf_police(act)) {
3264                         entry->id = FLOW_ACTION_POLICE;
3265                         entry->police.burst = tcf_police_tcfp_burst(act);
3266                         entry->police.rate_bytes_ps =
3267                                 tcf_police_rate_bytes_ps(act);
3268                 } else if (is_tcf_ct(act)) {
3269                         entry->id = FLOW_ACTION_CT;
3270                         entry->ct.action = tcf_ct_action(act);
3271                         entry->ct.zone = tcf_ct_zone(act);
3272                 } else {
3273                         goto err_out;
3274                 }
3275
3276                 if (!is_tcf_pedit(act))
3277                         j++;
3278         }
3279         return 0;
3280 err_out:
3281         return -EOPNOTSUPP;
3282 }
3283 EXPORT_SYMBOL(tc_setup_flow_action);
3284
3285 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3286 {
3287         unsigned int num_acts = 0;
3288         struct tc_action *act;
3289         int i;
3290
3291         tcf_exts_for_each_action(i, act, exts) {
3292                 if (is_tcf_pedit(act))
3293                         num_acts += tcf_pedit_nkeys(act);
3294                 else
3295                         num_acts++;
3296         }
3297         return num_acts;
3298 }
3299 EXPORT_SYMBOL(tcf_exts_num_actions);
3300
3301 static __net_init int tcf_net_init(struct net *net)
3302 {
3303         struct tcf_net *tn = net_generic(net, tcf_net_id);
3304
3305         spin_lock_init(&tn->idr_lock);
3306         idr_init(&tn->idr);
3307         return 0;
3308 }
3309
3310 static void __net_exit tcf_net_exit(struct net *net)
3311 {
3312         struct tcf_net *tn = net_generic(net, tcf_net_id);
3313
3314         idr_destroy(&tn->idr);
3315 }
3316
3317 static struct pernet_operations tcf_net_ops = {
3318         .init = tcf_net_init,
3319         .exit = tcf_net_exit,
3320         .id   = &tcf_net_id,
3321         .size = sizeof(struct tcf_net),
3322 };
3323
3324 static int __init tc_filter_init(void)
3325 {
3326         int err;
3327
3328         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3329         if (!tc_filter_wq)
3330                 return -ENOMEM;
3331
3332         err = register_pernet_subsys(&tcf_net_ops);
3333         if (err)
3334                 goto err_register_pernet_subsys;
3335
3336         err = rhashtable_init(&indr_setup_block_ht,
3337                               &tc_indr_setup_block_ht_params);
3338         if (err)
3339                 goto err_rhash_setup_block_ht;
3340
3341         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3342                       RTNL_FLAG_DOIT_UNLOCKED);
3343         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3344                       RTNL_FLAG_DOIT_UNLOCKED);
3345         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3346                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3347         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3348         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3349         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3350                       tc_dump_chain, 0);
3351
3352         return 0;
3353
3354 err_rhash_setup_block_ht:
3355         unregister_pernet_subsys(&tcf_net_ops);
3356 err_register_pernet_subsys:
3357         destroy_workqueue(tc_filter_wq);
3358         return err;
3359 }
3360
3361 subsys_initcall(tc_filter_init);