2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/percpu.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/skbuff.h>
42 #include <linux/bitmap.h>
43 #include <net/netlink.h>
44 #include <net/act_api.h>
45 #include <net/pkt_cls.h>
46 #include <linux/netdevice.h>
49 struct tc_u_knode __rcu *next;
51 struct tc_u_hnode __rcu *ht_up;
53 #ifdef CONFIG_NET_CLS_IND
57 struct tcf_result res;
58 struct tc_u_hnode __rcu *ht_down;
59 #ifdef CONFIG_CLS_U32_PERF
60 struct tc_u32_pcnt __percpu *pf;
63 #ifdef CONFIG_CLS_U32_MARK
66 u32 __percpu *pcpu_success;
70 /* The 'sel' field MUST be the last field in structure to allow for
71 * tc_u32_keys allocated at end of structure.
73 struct tc_u32_sel sel;
77 struct tc_u_hnode __rcu *next;
80 struct tc_u_common *tp_c;
84 /* The 'ht' field MUST be the last field in structure to allow for
85 * more entries allocated at end of structure.
87 struct tc_u_knode __rcu *ht[1];
91 struct tc_u_hnode __rcu *hlist;
98 static inline unsigned int u32_hash_fold(__be32 key,
99 const struct tc_u32_sel *sel,
102 unsigned int h = ntohl(key & sel->hmask) >> fshift;
107 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
108 struct tcf_result *res)
111 struct tc_u_knode *knode;
113 } stack[TC_U32_MAXDEPTH];
115 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
116 unsigned int off = skb_network_offset(skb);
117 struct tc_u_knode *n;
121 #ifdef CONFIG_CLS_U32_PERF
127 n = rcu_dereference_bh(ht->ht[sel]);
131 struct tc_u32_key *key = n->sel.keys;
133 #ifdef CONFIG_CLS_U32_PERF
134 __this_cpu_inc(n->pf->rcnt);
138 if (tc_skip_sw(n->flags)) {
139 n = rcu_dereference_bh(n->next);
143 #ifdef CONFIG_CLS_U32_MARK
144 if ((skb->mark & n->mask) != n->val) {
145 n = rcu_dereference_bh(n->next);
148 __this_cpu_inc(*n->pcpu_success);
152 for (i = n->sel.nkeys; i > 0; i--, key++) {
153 int toff = off + key->off + (off2 & key->offmask);
156 if (skb_headroom(skb) + toff > INT_MAX)
159 data = skb_header_pointer(skb, toff, 4, &hdata);
162 if ((*data ^ key->val) & key->mask) {
163 n = rcu_dereference_bh(n->next);
166 #ifdef CONFIG_CLS_U32_PERF
167 __this_cpu_inc(n->pf->kcnts[j]);
172 ht = rcu_dereference_bh(n->ht_down);
175 if (n->sel.flags & TC_U32_TERMINAL) {
178 #ifdef CONFIG_NET_CLS_IND
179 if (!tcf_match_indev(skb, n->ifindex)) {
180 n = rcu_dereference_bh(n->next);
184 #ifdef CONFIG_CLS_U32_PERF
185 __this_cpu_inc(n->pf->rhit);
187 r = tcf_exts_exec(skb, &n->exts, res);
189 n = rcu_dereference_bh(n->next);
195 n = rcu_dereference_bh(n->next);
200 if (sdepth >= TC_U32_MAXDEPTH)
202 stack[sdepth].knode = n;
203 stack[sdepth].off = off;
206 ht = rcu_dereference_bh(n->ht_down);
211 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
215 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
218 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
221 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
222 off2 = n->sel.off + 3;
223 if (n->sel.flags & TC_U32_VAROFFSET) {
226 data = skb_header_pointer(skb,
231 off2 += ntohs(n->sel.offmask & *data) >>
236 if (n->sel.flags & TC_U32_EAT) {
247 n = stack[sdepth].knode;
248 ht = rcu_dereference_bh(n->ht_up);
249 off = stack[sdepth].off;
256 net_warn_ratelimited("cls_u32: dead loop\n");
260 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
262 struct tc_u_hnode *ht;
264 for (ht = rtnl_dereference(tp_c->hlist);
266 ht = rtnl_dereference(ht->next))
267 if (ht->handle == handle)
273 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
276 struct tc_u_knode *n = NULL;
278 sel = TC_U32_HASH(handle);
279 if (sel > ht->divisor)
282 for (n = rtnl_dereference(ht->ht[sel]);
284 n = rtnl_dereference(n->next))
285 if (n->handle == handle)
292 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
294 struct tc_u_hnode *ht;
295 struct tc_u_common *tp_c = tp->data;
297 if (TC_U32_HTID(handle) == TC_U32_ROOT)
298 ht = rtnl_dereference(tp->root);
300 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
305 if (TC_U32_KEY(handle) == 0)
306 return (unsigned long)ht;
308 return (unsigned long)u32_lookup_key(ht, handle);
311 static u32 gen_new_htid(struct tc_u_common *tp_c)
315 /* hgenerator only used inside rtnl lock it is safe to increment
316 * without read _copy_ update semantics
319 if (++tp_c->hgenerator == 0x7FF)
320 tp_c->hgenerator = 1;
321 } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
323 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
326 static int u32_init(struct tcf_proto *tp)
328 struct tc_u_hnode *root_ht;
329 struct tc_u_common *tp_c;
331 tp_c = tp->q->u32_node;
333 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
337 root_ht->divisor = 0;
339 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
340 root_ht->prio = tp->prio;
343 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
349 tp->q->u32_node = tp_c;
353 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
354 rcu_assign_pointer(tp_c->hlist, root_ht);
355 root_ht->tp_c = tp_c;
357 rcu_assign_pointer(tp->root, root_ht);
362 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
365 tcf_exts_destroy(&n->exts);
367 n->ht_down->refcnt--;
368 #ifdef CONFIG_CLS_U32_PERF
372 #ifdef CONFIG_CLS_U32_MARK
374 free_percpu(n->pcpu_success);
380 /* u32_delete_key_rcu should be called when free'ing a copied
381 * version of a tc_u_knode obtained from u32_init_knode(). When
382 * copies are obtained from u32_init_knode() the statistics are
383 * shared between the old and new copies to allow readers to
384 * continue to update the statistics during the copy. To support
385 * this the u32_delete_key_rcu variant does not free the percpu
388 static void u32_delete_key_rcu(struct rcu_head *rcu)
390 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
392 u32_destroy_key(key->tp, key, false);
395 /* u32_delete_key_freepf_rcu is the rcu callback variant
396 * that free's the entire structure including the statistics
397 * percpu variables. Only use this if the key is not a copy
398 * returned by u32_init_knode(). See u32_delete_key_rcu()
399 * for the variant that should be used with keys return from
402 static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
404 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
406 u32_destroy_key(key->tp, key, true);
409 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
411 struct tc_u_knode __rcu **kp;
412 struct tc_u_knode *pkp;
413 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
416 kp = &ht->ht[TC_U32_HASH(key->handle)];
417 for (pkp = rtnl_dereference(*kp); pkp;
418 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
420 RCU_INIT_POINTER(*kp, key->next);
422 tcf_unbind_filter(tp, &key->res);
423 call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
432 static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
434 struct net_device *dev = tp->q->dev_queue->dev;
435 struct tc_cls_u32_offload u32_offload = {0};
436 struct tc_to_netdev offload;
438 offload.type = TC_SETUP_CLSU32;
439 offload.cls_u32 = &u32_offload;
441 if (tc_should_offload(dev, tp, 0)) {
442 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
443 offload.cls_u32->knode.handle = handle;
444 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
445 tp->protocol, &offload);
449 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
452 struct net_device *dev = tp->q->dev_queue->dev;
453 struct tc_cls_u32_offload u32_offload = {0};
454 struct tc_to_netdev offload;
457 if (!tc_should_offload(dev, tp, flags))
458 return tc_skip_sw(flags) ? -EINVAL : 0;
460 offload.type = TC_SETUP_CLSU32;
461 offload.cls_u32 = &u32_offload;
463 offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
464 offload.cls_u32->hnode.divisor = h->divisor;
465 offload.cls_u32->hnode.handle = h->handle;
466 offload.cls_u32->hnode.prio = h->prio;
468 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
469 tp->protocol, &offload);
470 if (tc_skip_sw(flags))
476 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
478 struct net_device *dev = tp->q->dev_queue->dev;
479 struct tc_cls_u32_offload u32_offload = {0};
480 struct tc_to_netdev offload;
482 offload.type = TC_SETUP_CLSU32;
483 offload.cls_u32 = &u32_offload;
485 if (tc_should_offload(dev, tp, 0)) {
486 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
487 offload.cls_u32->hnode.divisor = h->divisor;
488 offload.cls_u32->hnode.handle = h->handle;
489 offload.cls_u32->hnode.prio = h->prio;
491 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
492 tp->protocol, &offload);
496 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
499 struct net_device *dev = tp->q->dev_queue->dev;
500 struct tc_cls_u32_offload u32_offload = {0};
501 struct tc_to_netdev offload;
504 offload.type = TC_SETUP_CLSU32;
505 offload.cls_u32 = &u32_offload;
507 if (!tc_should_offload(dev, tp, flags))
508 return tc_skip_sw(flags) ? -EINVAL : 0;
510 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
511 offload.cls_u32->knode.handle = n->handle;
512 offload.cls_u32->knode.fshift = n->fshift;
513 #ifdef CONFIG_CLS_U32_MARK
514 offload.cls_u32->knode.val = n->val;
515 offload.cls_u32->knode.mask = n->mask;
517 offload.cls_u32->knode.val = 0;
518 offload.cls_u32->knode.mask = 0;
520 offload.cls_u32->knode.sel = &n->sel;
521 offload.cls_u32->knode.exts = &n->exts;
523 offload.cls_u32->knode.link_handle = n->ht_down->handle;
525 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
526 tp->protocol, &offload);
527 if (tc_skip_sw(flags))
533 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
535 struct tc_u_knode *n;
538 for (h = 0; h <= ht->divisor; h++) {
539 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
540 RCU_INIT_POINTER(ht->ht[h],
541 rtnl_dereference(n->next));
542 tcf_unbind_filter(tp, &n->res);
543 u32_remove_hw_knode(tp, n->handle);
544 call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
549 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
551 struct tc_u_common *tp_c = tp->data;
552 struct tc_u_hnode __rcu **hn;
553 struct tc_u_hnode *phn;
557 u32_clear_hnode(tp, ht);
560 for (phn = rtnl_dereference(*hn);
562 hn = &phn->next, phn = rtnl_dereference(*hn)) {
564 u32_clear_hw_hnode(tp, ht);
565 RCU_INIT_POINTER(*hn, ht->next);
574 static bool ht_empty(struct tc_u_hnode *ht)
578 for (h = 0; h <= ht->divisor; h++)
579 if (rcu_access_pointer(ht->ht[h]))
585 static bool u32_destroy(struct tcf_proto *tp, bool force)
587 struct tc_u_common *tp_c = tp->data;
588 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
590 WARN_ON(root_ht == NULL);
594 if (root_ht->refcnt > 1)
596 if (root_ht->refcnt == 1) {
597 if (!ht_empty(root_ht))
602 if (tp_c->refcnt > 1)
605 if (tp_c->refcnt == 1) {
606 struct tc_u_hnode *ht;
608 for (ht = rtnl_dereference(tp_c->hlist);
610 ht = rtnl_dereference(ht->next))
616 if (root_ht && --root_ht->refcnt == 0)
617 u32_destroy_hnode(tp, root_ht);
619 if (--tp_c->refcnt == 0) {
620 struct tc_u_hnode *ht;
622 tp->q->u32_node = NULL;
624 for (ht = rtnl_dereference(tp_c->hlist);
626 ht = rtnl_dereference(ht->next)) {
628 u32_clear_hnode(tp, ht);
631 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
632 RCU_INIT_POINTER(tp_c->hlist, ht->next);
643 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
645 struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
646 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
651 if (TC_U32_KEY(ht->handle)) {
652 u32_remove_hw_knode(tp, ht->handle);
653 return u32_delete_key(tp, (struct tc_u_knode *)ht);
659 if (ht->refcnt == 1) {
661 u32_destroy_hnode(tp, ht);
669 #define NR_U32_NODE (1<<12)
670 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
672 struct tc_u_knode *n;
674 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
677 return handle | 0xFFF;
679 for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]);
681 n = rtnl_dereference(n->next))
682 set_bit(TC_U32_NODE(n->handle), bitmap);
684 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
685 if (i >= NR_U32_NODE)
686 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
689 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
692 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
693 [TCA_U32_CLASSID] = { .type = NLA_U32 },
694 [TCA_U32_HASH] = { .type = NLA_U32 },
695 [TCA_U32_LINK] = { .type = NLA_U32 },
696 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
697 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
698 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
699 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
700 [TCA_U32_FLAGS] = { .type = NLA_U32 },
703 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
704 unsigned long base, struct tc_u_hnode *ht,
705 struct tc_u_knode *n, struct nlattr **tb,
706 struct nlattr *est, bool ovr)
711 err = tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
714 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
719 if (tb[TCA_U32_LINK]) {
720 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
721 struct tc_u_hnode *ht_down = NULL, *ht_old;
723 if (TC_U32_KEY(handle))
727 ht_down = u32_lookup_ht(ht->tp_c, handle);
734 ht_old = rtnl_dereference(n->ht_down);
735 rcu_assign_pointer(n->ht_down, ht_down);
740 if (tb[TCA_U32_CLASSID]) {
741 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
742 tcf_bind_filter(tp, &n->res, base);
745 #ifdef CONFIG_NET_CLS_IND
746 if (tb[TCA_U32_INDEV]) {
748 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
754 tcf_exts_change(tp, &n->exts, &e);
758 tcf_exts_destroy(&e);
762 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
763 struct tc_u_knode *n)
765 struct tc_u_knode __rcu **ins;
766 struct tc_u_knode *pins;
767 struct tc_u_hnode *ht;
769 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
770 ht = rtnl_dereference(tp->root);
772 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
774 ins = &ht->ht[TC_U32_HASH(n->handle)];
776 /* The node must always exist for it to be replaced if this is not the
777 * case then something went very wrong elsewhere.
779 for (pins = rtnl_dereference(*ins); ;
780 ins = &pins->next, pins = rtnl_dereference(*ins))
781 if (pins->handle == n->handle)
784 RCU_INIT_POINTER(n->next, pins->next);
785 rcu_assign_pointer(*ins, n);
788 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
789 struct tc_u_knode *n)
791 struct tc_u_knode *new;
792 struct tc_u32_sel *s = &n->sel;
794 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
800 RCU_INIT_POINTER(new->next, n->next);
801 new->handle = n->handle;
802 RCU_INIT_POINTER(new->ht_up, n->ht_up);
804 #ifdef CONFIG_NET_CLS_IND
805 new->ifindex = n->ifindex;
807 new->fshift = n->fshift;
809 new->flags = n->flags;
810 RCU_INIT_POINTER(new->ht_down, n->ht_down);
812 /* bump reference count as long as we hold pointer to structure */
814 new->ht_down->refcnt++;
816 #ifdef CONFIG_CLS_U32_PERF
817 /* Statistics may be incremented by readers during update
818 * so we must keep them in tact. When the node is later destroyed
819 * a special destroy call must be made to not free the pf memory.
824 #ifdef CONFIG_CLS_U32_MARK
827 /* Similarly success statistics must be moved as pointers */
828 new->pcpu_success = n->pcpu_success;
831 memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
833 if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
841 static int u32_change(struct net *net, struct sk_buff *in_skb,
842 struct tcf_proto *tp, unsigned long base, u32 handle,
843 struct nlattr **tca, unsigned long *arg, bool ovr)
845 struct tc_u_common *tp_c = tp->data;
846 struct tc_u_hnode *ht;
847 struct tc_u_knode *n;
848 struct tc_u32_sel *s;
849 struct nlattr *opt = tca[TCA_OPTIONS];
850 struct nlattr *tb[TCA_U32_MAX + 1];
853 #ifdef CONFIG_CLS_U32_PERF
858 return handle ? -EINVAL : 0;
860 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
864 if (tb[TCA_U32_FLAGS]) {
865 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
866 if (!tc_flags_valid(flags))
870 n = (struct tc_u_knode *)*arg;
872 struct tc_u_knode *new;
874 if (TC_U32_KEY(n->handle) == 0)
877 if (n->flags != flags)
880 new = u32_init_knode(tp, n);
884 err = u32_set_parms(net, tp, base,
885 rtnl_dereference(n->ht_up), new, tb,
889 u32_destroy_key(tp, new, false);
893 err = u32_replace_hw_knode(tp, new, flags);
895 u32_destroy_key(tp, new, false);
899 u32_replace_knode(tp, tp_c, new);
900 tcf_unbind_filter(tp, &n->res);
901 call_rcu(&n->rcu, u32_delete_key_rcu);
905 if (tb[TCA_U32_DIVISOR]) {
906 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
908 if (--divisor > 0x100)
910 if (TC_U32_KEY(handle))
913 handle = gen_new_htid(tp->data);
917 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
922 ht->divisor = divisor;
926 err = u32_replace_hw_hnode(tp, ht, flags);
932 RCU_INIT_POINTER(ht->next, tp_c->hlist);
933 rcu_assign_pointer(tp_c->hlist, ht);
934 *arg = (unsigned long)ht;
939 if (tb[TCA_U32_HASH]) {
940 htid = nla_get_u32(tb[TCA_U32_HASH]);
941 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
942 ht = rtnl_dereference(tp->root);
945 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
950 ht = rtnl_dereference(tp->root);
954 if (ht->divisor < TC_U32_HASH(htid))
958 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
960 handle = htid | TC_U32_NODE(handle);
962 handle = gen_new_kid(ht, htid);
964 if (tb[TCA_U32_SEL] == NULL)
967 s = nla_data(tb[TCA_U32_SEL]);
969 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
973 #ifdef CONFIG_CLS_U32_PERF
974 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
975 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
982 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
983 RCU_INIT_POINTER(n->ht_up, ht);
985 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
989 err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
993 #ifdef CONFIG_CLS_U32_MARK
994 n->pcpu_success = alloc_percpu(u32);
995 if (!n->pcpu_success) {
1000 if (tb[TCA_U32_MARK]) {
1001 struct tc_u32_mark *mark;
1003 mark = nla_data(tb[TCA_U32_MARK]);
1005 n->mask = mark->mask;
1009 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
1011 struct tc_u_knode __rcu **ins;
1012 struct tc_u_knode *pins;
1014 err = u32_replace_hw_knode(tp, n, flags);
1018 ins = &ht->ht[TC_U32_HASH(handle)];
1019 for (pins = rtnl_dereference(*ins); pins;
1020 ins = &pins->next, pins = rtnl_dereference(*ins))
1021 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1024 RCU_INIT_POINTER(n->next, pins);
1025 rcu_assign_pointer(*ins, n);
1026 *arg = (unsigned long)n;
1031 #ifdef CONFIG_CLS_U32_MARK
1032 free_percpu(n->pcpu_success);
1036 tcf_exts_destroy(&n->exts);
1037 #ifdef CONFIG_CLS_U32_PERF
1044 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1046 struct tc_u_common *tp_c = tp->data;
1047 struct tc_u_hnode *ht;
1048 struct tc_u_knode *n;
1054 for (ht = rtnl_dereference(tp_c->hlist);
1056 ht = rtnl_dereference(ht->next)) {
1057 if (ht->prio != tp->prio)
1059 if (arg->count >= arg->skip) {
1060 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
1066 for (h = 0; h <= ht->divisor; h++) {
1067 for (n = rtnl_dereference(ht->ht[h]);
1069 n = rtnl_dereference(n->next)) {
1070 if (arg->count < arg->skip) {
1074 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
1084 static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
1085 struct sk_buff *skb, struct tcmsg *t)
1087 struct tc_u_knode *n = (struct tc_u_knode *)fh;
1088 struct tc_u_hnode *ht_up, *ht_down;
1089 struct nlattr *nest;
1094 t->tcm_handle = n->handle;
1096 nest = nla_nest_start(skb, TCA_OPTIONS);
1098 goto nla_put_failure;
1100 if (TC_U32_KEY(n->handle) == 0) {
1101 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
1102 u32 divisor = ht->divisor + 1;
1104 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1105 goto nla_put_failure;
1107 #ifdef CONFIG_CLS_U32_PERF
1108 struct tc_u32_pcnt *gpf;
1112 if (nla_put(skb, TCA_U32_SEL,
1113 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1115 goto nla_put_failure;
1117 ht_up = rtnl_dereference(n->ht_up);
1119 u32 htid = n->handle & 0xFFFFF000;
1120 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1121 goto nla_put_failure;
1123 if (n->res.classid &&
1124 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1125 goto nla_put_failure;
1127 ht_down = rtnl_dereference(n->ht_down);
1129 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1130 goto nla_put_failure;
1132 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1133 goto nla_put_failure;
1135 #ifdef CONFIG_CLS_U32_MARK
1136 if ((n->val || n->mask)) {
1137 struct tc_u32_mark mark = {.val = n->val,
1142 for_each_possible_cpu(cpum) {
1143 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1145 mark.success += cnt;
1148 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1149 goto nla_put_failure;
1153 if (tcf_exts_dump(skb, &n->exts) < 0)
1154 goto nla_put_failure;
1156 #ifdef CONFIG_NET_CLS_IND
1158 struct net_device *dev;
1159 dev = __dev_get_by_index(net, n->ifindex);
1160 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1161 goto nla_put_failure;
1164 #ifdef CONFIG_CLS_U32_PERF
1165 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1166 n->sel.nkeys * sizeof(u64),
1169 goto nla_put_failure;
1171 for_each_possible_cpu(cpu) {
1173 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1175 gpf->rcnt += pf->rcnt;
1176 gpf->rhit += pf->rhit;
1177 for (i = 0; i < n->sel.nkeys; i++)
1178 gpf->kcnts[i] += pf->kcnts[i];
1181 if (nla_put_64bit(skb, TCA_U32_PCNT,
1182 sizeof(struct tc_u32_pcnt) +
1183 n->sel.nkeys * sizeof(u64),
1184 gpf, TCA_U32_PAD)) {
1186 goto nla_put_failure;
1192 nla_nest_end(skb, nest);
1194 if (TC_U32_KEY(n->handle))
1195 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1196 goto nla_put_failure;
1200 nla_nest_cancel(skb, nest);
1204 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1206 .classify = u32_classify,
1208 .destroy = u32_destroy,
1210 .change = u32_change,
1211 .delete = u32_delete,
1214 .owner = THIS_MODULE,
1217 static int __init init_u32(void)
1219 pr_info("u32 classifier\n");
1220 #ifdef CONFIG_CLS_U32_PERF
1221 pr_info(" Performance counters on\n");
1223 #ifdef CONFIG_NET_CLS_IND
1224 pr_info(" input device check on\n");
1226 #ifdef CONFIG_NET_CLS_ACT
1227 pr_info(" Actions configured\n");
1229 return register_tcf_proto_ops(&cls_u32_ops);
1232 static void __exit exit_u32(void)
1234 unregister_tcf_proto_ops(&cls_u32_ops);
1237 module_init(init_u32)
1238 module_exit(exit_u32)
1239 MODULE_LICENSE("GPL");