2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/sch_generic.h>
19 * Passing parameters to the root seems to be done more awkwardly than really
20 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
24 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
25 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
28 struct tcindex_filter_result {
30 struct tcf_result res;
31 struct rcu_work rwork;
34 struct tcindex_filter {
36 struct tcindex_filter_result result;
37 struct tcindex_filter __rcu *next;
38 struct rcu_work rwork;
43 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
44 struct tcindex_filter __rcu **h; /* imperfect hash; */
46 u16 mask; /* AND key with mask */
47 u32 shift; /* shift ANDed key to the right */
48 u32 hash; /* hash table size; 0 if undefined */
49 u32 alloc_hash; /* allocated size */
50 u32 fall_through; /* 0: only classify if explicit match */
51 struct rcu_work rwork;
54 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
56 return tcf_exts_has_actions(&r->exts) || r->res.classid;
59 static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
63 struct tcindex_filter_result *f = p->perfect + key;
65 return tcindex_filter_is_set(f) ? f : NULL;
67 struct tcindex_filter __rcu **fp;
68 struct tcindex_filter *f;
70 fp = &p->h[key % p->hash];
71 for (f = rcu_dereference_bh_rtnl(*fp);
73 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
82 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res)
85 struct tcindex_data *p = rcu_dereference_bh(tp->root);
86 struct tcindex_filter_result *f;
87 int key = (skb->tc_index & p->mask) >> p->shift;
89 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
92 f = tcindex_lookup(p, key);
94 struct Qdisc *q = tcf_block_q(tp->chain->block);
98 res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
100 pr_debug("alg 0x%x\n", res->classid);
104 pr_debug("map 0x%x\n", res->classid);
106 return tcf_exts_exec(skb, &f->exts, res);
110 static void *tcindex_get(struct tcf_proto *tp, u32 handle)
112 struct tcindex_data *p = rtnl_dereference(tp->root);
113 struct tcindex_filter_result *r;
115 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
116 if (p->perfect && handle >= p->alloc_hash)
118 r = tcindex_lookup(p, handle);
119 return r && tcindex_filter_is_set(r) ? r : NULL;
122 static int tcindex_init(struct tcf_proto *tp)
124 struct tcindex_data *p;
126 pr_debug("tcindex_init(tp %p)\n", tp);
127 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
132 p->hash = DEFAULT_HASH_SIZE;
135 rcu_assign_pointer(tp->root, p);
139 static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
141 tcf_exts_destroy(&r->exts);
142 tcf_exts_put_net(&r->exts);
145 static void tcindex_destroy_rexts_work(struct work_struct *work)
147 struct tcindex_filter_result *r;
149 r = container_of(to_rcu_work(work),
150 struct tcindex_filter_result,
153 __tcindex_destroy_rexts(r);
157 static void __tcindex_destroy_fexts(struct tcindex_filter *f)
159 tcf_exts_destroy(&f->result.exts);
160 tcf_exts_put_net(&f->result.exts);
164 static void tcindex_destroy_fexts_work(struct work_struct *work)
166 struct tcindex_filter *f = container_of(to_rcu_work(work),
167 struct tcindex_filter,
171 __tcindex_destroy_fexts(f);
175 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
176 bool rtnl_held, struct netlink_ext_ack *extack)
178 struct tcindex_data *p = rtnl_dereference(tp->root);
179 struct tcindex_filter_result *r = arg;
180 struct tcindex_filter __rcu **walk;
181 struct tcindex_filter *f = NULL;
183 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
190 for (i = 0; i < p->hash; i++) {
192 for (f = rtnl_dereference(*walk); f;
193 walk = &f->next, f = rtnl_dereference(*walk)) {
201 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
203 tcf_unbind_filter(tp, &r->res);
204 /* all classifiers are required to call tcf_exts_destroy() after rcu
205 * grace period, since converted-to-rcu actions are relying on that
206 * in cleanup() callback
209 if (tcf_exts_get_net(&f->result.exts))
210 tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
212 __tcindex_destroy_fexts(f);
214 if (tcf_exts_get_net(&r->exts))
215 tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
217 __tcindex_destroy_rexts(r);
224 static void tcindex_destroy_work(struct work_struct *work)
226 struct tcindex_data *p = container_of(to_rcu_work(work),
236 valid_perfect_hash(struct tcindex_data *p)
238 return p->hash > (p->mask >> p->shift);
241 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
242 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
243 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
244 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
245 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
246 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
249 static int tcindex_filter_result_init(struct tcindex_filter_result *r,
252 memset(r, 0, sizeof(*r));
253 return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
257 static void tcindex_partial_destroy_work(struct work_struct *work)
259 struct tcindex_data *p = container_of(to_rcu_work(work),
267 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
271 for (i = 0; i < cp->hash; i++)
272 tcf_exts_destroy(&cp->perfect[i].exts);
276 static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
280 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
285 for (i = 0; i < cp->hash; i++) {
286 err = tcf_exts_init(&cp->perfect[i].exts, net,
287 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
295 tcindex_free_perfect_hash(cp);
300 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
301 u32 handle, struct tcindex_data *p,
302 struct tcindex_filter_result *r, struct nlattr **tb,
303 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
305 struct tcindex_filter_result new_filter_result, *old_r = r;
306 struct tcindex_data *cp = NULL, *oldp;
307 struct tcindex_filter *f = NULL; /* make gcc behave */
308 struct tcf_result cr = {};
312 err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
315 err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
320 /* tcindex_data attributes must look atomic to classifier/lookup so
321 * allocate new tcindex data and RCU assign it onto root. Keeping
322 * perfect hash and hash pointers from old data.
324 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
329 cp->shift = p->shift;
331 cp->alloc_hash = p->alloc_hash;
332 cp->fall_through = p->fall_through;
338 if (tcindex_alloc_perfect_hash(net, cp) < 0)
340 for (i = 0; i < cp->hash; i++)
341 cp->perfect[i].res = p->perfect[i].res;
346 err = tcindex_filter_result_init(&new_filter_result, net);
352 if (tb[TCA_TCINDEX_HASH])
353 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
355 if (tb[TCA_TCINDEX_MASK])
356 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
358 if (tb[TCA_TCINDEX_SHIFT])
359 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
363 /* Hash already allocated, make sure that we still meet the
364 * requirements for the allocated hash.
367 if (!valid_perfect_hash(cp) ||
368 cp->hash > cp->alloc_hash)
370 } else if (cp->h && cp->hash != cp->alloc_hash) {
375 if (tb[TCA_TCINDEX_FALL_THROUGH])
376 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
379 /* Hash not specified, use perfect hash if the upper limit
380 * of the hashing index is below the threshold.
382 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
383 cp->hash = (cp->mask >> cp->shift) + 1;
385 cp->hash = DEFAULT_HASH_SIZE;
388 if (!cp->perfect && !cp->h)
389 cp->alloc_hash = cp->hash;
391 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
392 * but then, we'd fail handles that may become valid after some future
393 * mask change. While this is extremely unlikely to ever matter,
394 * the check below is safer (and also more backwards-compatible).
396 if (cp->perfect || valid_perfect_hash(cp))
397 if (handle >= cp->alloc_hash)
402 if (!cp->perfect && !cp->h) {
403 if (valid_perfect_hash(cp)) {
404 if (tcindex_alloc_perfect_hash(net, cp) < 0)
408 struct tcindex_filter __rcu **hash;
410 hash = kcalloc(cp->hash,
411 sizeof(struct tcindex_filter *),
423 r = cp->perfect + handle;
425 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
427 if (r == &new_filter_result) {
428 f = kzalloc(sizeof(*f), GFP_KERNEL);
433 err = tcindex_filter_result_init(&f->result, net);
440 if (tb[TCA_TCINDEX_CLASSID]) {
441 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
442 tcf_bind_filter(tp, &cr, base);
445 if (old_r && old_r != r) {
446 err = tcindex_filter_result_init(old_r, net);
455 tcf_exts_change(&r->exts, &e);
457 rcu_assign_pointer(tp->root, cp);
459 if (r == &new_filter_result) {
460 struct tcindex_filter *nfp;
461 struct tcindex_filter __rcu **fp;
463 f->result.res = r->res;
464 tcf_exts_change(&f->result.exts, &r->exts);
466 fp = cp->h + (handle % cp->hash);
467 for (nfp = rtnl_dereference(*fp);
469 fp = &nfp->next, nfp = rtnl_dereference(*fp))
472 rcu_assign_pointer(*fp, f);
474 tcf_exts_destroy(&new_filter_result.exts);
478 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
483 tcindex_free_perfect_hash(cp);
484 else if (balloc == 2)
487 tcf_exts_destroy(&new_filter_result.exts);
490 tcf_exts_destroy(&e);
495 tcindex_change(struct net *net, struct sk_buff *in_skb,
496 struct tcf_proto *tp, unsigned long base, u32 handle,
497 struct nlattr **tca, void **arg, bool ovr,
498 bool rtnl_held, struct netlink_ext_ack *extack)
500 struct nlattr *opt = tca[TCA_OPTIONS];
501 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
502 struct tcindex_data *p = rtnl_dereference(tp->root);
503 struct tcindex_filter_result *r = *arg;
506 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
507 "p %p,r %p,*arg %p\n",
508 tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
513 err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
514 tcindex_policy, NULL);
518 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
519 tca[TCA_RATE], ovr, extack);
522 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
525 struct tcindex_data *p = rtnl_dereference(tp->root);
526 struct tcindex_filter *f, *next;
529 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
531 for (i = 0; i < p->hash; i++) {
532 if (!p->perfect[i].res.class)
534 if (walker->count >= walker->skip) {
535 if (walker->fn(tp, p->perfect + i, walker) < 0) {
545 for (i = 0; i < p->hash; i++) {
546 for (f = rtnl_dereference(p->h[i]); f; f = next) {
547 next = rtnl_dereference(f->next);
548 if (walker->count >= walker->skip) {
549 if (walker->fn(tp, &f->result, walker) < 0) {
559 static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
560 struct netlink_ext_ack *extack)
562 struct tcindex_data *p = rtnl_dereference(tp->root);
565 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
568 for (i = 0; i < p->hash; i++) {
569 struct tcindex_filter_result *r = p->perfect + i;
571 tcf_unbind_filter(tp, &r->res);
572 if (tcf_exts_get_net(&r->exts))
573 tcf_queue_work(&r->rwork,
574 tcindex_destroy_rexts_work);
576 __tcindex_destroy_rexts(r);
580 for (i = 0; p->h && i < p->hash; i++) {
581 struct tcindex_filter *f, *next;
584 for (f = rtnl_dereference(p->h[i]); f; f = next) {
585 next = rtnl_dereference(f->next);
586 tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
590 tcf_queue_work(&p->rwork, tcindex_destroy_work);
594 static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
595 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
597 struct tcindex_data *p = rtnl_dereference(tp->root);
598 struct tcindex_filter_result *r = fh;
601 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
602 tp, fh, skb, t, p, r);
603 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
605 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
607 goto nla_put_failure;
610 t->tcm_handle = ~0; /* whatever ... */
611 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
612 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
613 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
614 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
615 goto nla_put_failure;
616 nla_nest_end(skb, nest);
619 t->tcm_handle = r - p->perfect;
621 struct tcindex_filter *f;
622 struct tcindex_filter __rcu **fp;
626 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
628 for (f = rtnl_dereference(*fp);
630 fp = &f->next, f = rtnl_dereference(*fp)) {
632 t->tcm_handle = f->key;
636 pr_debug("handle = %d\n", t->tcm_handle);
638 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
639 goto nla_put_failure;
641 if (tcf_exts_dump(skb, &r->exts) < 0)
642 goto nla_put_failure;
643 nla_nest_end(skb, nest);
645 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
646 goto nla_put_failure;
652 nla_nest_cancel(skb, nest);
656 static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
658 struct tcindex_filter_result *r = fh;
660 if (r && r->res.classid == classid)
664 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
666 .classify = tcindex_classify,
667 .init = tcindex_init,
668 .destroy = tcindex_destroy,
670 .change = tcindex_change,
671 .delete = tcindex_delete,
672 .walk = tcindex_walk,
673 .dump = tcindex_dump,
674 .bind_class = tcindex_bind_class,
675 .owner = THIS_MODULE,
678 static int __init init_tcindex(void)
680 return register_tcf_proto_ops(&cls_tcindex_ops);
683 static void __exit exit_tcindex(void)
685 unregister_tcf_proto_ops(&cls_tcindex_ops);
688 module_init(init_tcindex)
689 module_exit(exit_tcindex)
690 MODULE_LICENSE("GPL");