]> asedeno.scripts.mit.edu Git - linux.git/blob - net/sched/cls_tcindex.c
Merge tag 'for-5.2/block-post-20190516' of git://git.kernel.dk/linux-block
[linux.git] / net / sched / cls_tcindex.c
1 /*
2  * net/sched/cls_tcindex.c      Packet classifier for skb->tc_index
3  *
4  * Written 1998,1999 by Werner Almesberger, EPFL ICA
5  */
6
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/sch_generic.h>
17
18 /*
19  * Passing parameters to the root seems to be done more awkwardly than really
20  * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
21  * verified. FIXME.
22  */
23
24 #define PERFECT_HASH_THRESHOLD  64      /* use perfect hash if not bigger */
25 #define DEFAULT_HASH_SIZE       64      /* optimized for diffserv */
26
27
28 struct tcindex_filter_result {
29         struct tcf_exts         exts;
30         struct tcf_result       res;
31         struct rcu_work         rwork;
32 };
33
34 struct tcindex_filter {
35         u16 key;
36         struct tcindex_filter_result result;
37         struct tcindex_filter __rcu *next;
38         struct rcu_work rwork;
39 };
40
41
42 struct tcindex_data {
43         struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
44         struct tcindex_filter __rcu **h; /* imperfect hash; */
45         struct tcf_proto *tp;
46         u16 mask;               /* AND key with mask */
47         u32 shift;              /* shift ANDed key to the right */
48         u32 hash;               /* hash table size; 0 if undefined */
49         u32 alloc_hash;         /* allocated size */
50         u32 fall_through;       /* 0: only classify if explicit match */
51         struct rcu_work rwork;
52 };
53
54 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
55 {
56         return tcf_exts_has_actions(&r->exts) || r->res.classid;
57 }
58
59 static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
60                                                     u16 key)
61 {
62         if (p->perfect) {
63                 struct tcindex_filter_result *f = p->perfect + key;
64
65                 return tcindex_filter_is_set(f) ? f : NULL;
66         } else if (p->h) {
67                 struct tcindex_filter __rcu **fp;
68                 struct tcindex_filter *f;
69
70                 fp = &p->h[key % p->hash];
71                 for (f = rcu_dereference_bh_rtnl(*fp);
72                      f;
73                      fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
74                         if (f->key == key)
75                                 return &f->result;
76         }
77
78         return NULL;
79 }
80
81
82 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83                             struct tcf_result *res)
84 {
85         struct tcindex_data *p = rcu_dereference_bh(tp->root);
86         struct tcindex_filter_result *f;
87         int key = (skb->tc_index & p->mask) >> p->shift;
88
89         pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
90                  skb, tp, res, p);
91
92         f = tcindex_lookup(p, key);
93         if (!f) {
94                 struct Qdisc *q = tcf_block_q(tp->chain->block);
95
96                 if (!p->fall_through)
97                         return -1;
98                 res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
99                 res->class = 0;
100                 pr_debug("alg 0x%x\n", res->classid);
101                 return 0;
102         }
103         *res = f->res;
104         pr_debug("map 0x%x\n", res->classid);
105
106         return tcf_exts_exec(skb, &f->exts, res);
107 }
108
109
110 static void *tcindex_get(struct tcf_proto *tp, u32 handle)
111 {
112         struct tcindex_data *p = rtnl_dereference(tp->root);
113         struct tcindex_filter_result *r;
114
115         pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
116         if (p->perfect && handle >= p->alloc_hash)
117                 return NULL;
118         r = tcindex_lookup(p, handle);
119         return r && tcindex_filter_is_set(r) ? r : NULL;
120 }
121
122 static int tcindex_init(struct tcf_proto *tp)
123 {
124         struct tcindex_data *p;
125
126         pr_debug("tcindex_init(tp %p)\n", tp);
127         p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
128         if (!p)
129                 return -ENOMEM;
130
131         p->mask = 0xffff;
132         p->hash = DEFAULT_HASH_SIZE;
133         p->fall_through = 1;
134
135         rcu_assign_pointer(tp->root, p);
136         return 0;
137 }
138
139 static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
140 {
141         tcf_exts_destroy(&r->exts);
142         tcf_exts_put_net(&r->exts);
143 }
144
145 static void tcindex_destroy_rexts_work(struct work_struct *work)
146 {
147         struct tcindex_filter_result *r;
148
149         r = container_of(to_rcu_work(work),
150                          struct tcindex_filter_result,
151                          rwork);
152         rtnl_lock();
153         __tcindex_destroy_rexts(r);
154         rtnl_unlock();
155 }
156
157 static void __tcindex_destroy_fexts(struct tcindex_filter *f)
158 {
159         tcf_exts_destroy(&f->result.exts);
160         tcf_exts_put_net(&f->result.exts);
161         kfree(f);
162 }
163
164 static void tcindex_destroy_fexts_work(struct work_struct *work)
165 {
166         struct tcindex_filter *f = container_of(to_rcu_work(work),
167                                                 struct tcindex_filter,
168                                                 rwork);
169
170         rtnl_lock();
171         __tcindex_destroy_fexts(f);
172         rtnl_unlock();
173 }
174
175 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
176                           bool rtnl_held, struct netlink_ext_ack *extack)
177 {
178         struct tcindex_data *p = rtnl_dereference(tp->root);
179         struct tcindex_filter_result *r = arg;
180         struct tcindex_filter __rcu **walk;
181         struct tcindex_filter *f = NULL;
182
183         pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
184         if (p->perfect) {
185                 if (!r->res.class)
186                         return -ENOENT;
187         } else {
188                 int i;
189
190                 for (i = 0; i < p->hash; i++) {
191                         walk = p->h + i;
192                         for (f = rtnl_dereference(*walk); f;
193                              walk = &f->next, f = rtnl_dereference(*walk)) {
194                                 if (&f->result == r)
195                                         goto found;
196                         }
197                 }
198                 return -ENOENT;
199
200 found:
201                 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
202         }
203         tcf_unbind_filter(tp, &r->res);
204         /* all classifiers are required to call tcf_exts_destroy() after rcu
205          * grace period, since converted-to-rcu actions are relying on that
206          * in cleanup() callback
207          */
208         if (f) {
209                 if (tcf_exts_get_net(&f->result.exts))
210                         tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
211                 else
212                         __tcindex_destroy_fexts(f);
213         } else {
214                 if (tcf_exts_get_net(&r->exts))
215                         tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
216                 else
217                         __tcindex_destroy_rexts(r);
218         }
219
220         *last = false;
221         return 0;
222 }
223
224 static void tcindex_destroy_work(struct work_struct *work)
225 {
226         struct tcindex_data *p = container_of(to_rcu_work(work),
227                                               struct tcindex_data,
228                                               rwork);
229
230         kfree(p->perfect);
231         kfree(p->h);
232         kfree(p);
233 }
234
235 static inline int
236 valid_perfect_hash(struct tcindex_data *p)
237 {
238         return  p->hash > (p->mask >> p->shift);
239 }
240
241 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
242         [TCA_TCINDEX_HASH]              = { .type = NLA_U32 },
243         [TCA_TCINDEX_MASK]              = { .type = NLA_U16 },
244         [TCA_TCINDEX_SHIFT]             = { .type = NLA_U32 },
245         [TCA_TCINDEX_FALL_THROUGH]      = { .type = NLA_U32 },
246         [TCA_TCINDEX_CLASSID]           = { .type = NLA_U32 },
247 };
248
249 static int tcindex_filter_result_init(struct tcindex_filter_result *r,
250                                       struct net *net)
251 {
252         memset(r, 0, sizeof(*r));
253         return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
254                              TCA_TCINDEX_POLICE);
255 }
256
257 static void tcindex_partial_destroy_work(struct work_struct *work)
258 {
259         struct tcindex_data *p = container_of(to_rcu_work(work),
260                                               struct tcindex_data,
261                                               rwork);
262
263         kfree(p->perfect);
264         kfree(p);
265 }
266
267 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
268 {
269         int i;
270
271         for (i = 0; i < cp->hash; i++)
272                 tcf_exts_destroy(&cp->perfect[i].exts);
273         kfree(cp->perfect);
274 }
275
276 static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
277 {
278         int i, err = 0;
279
280         cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
281                               GFP_KERNEL);
282         if (!cp->perfect)
283                 return -ENOMEM;
284
285         for (i = 0; i < cp->hash; i++) {
286                 err = tcf_exts_init(&cp->perfect[i].exts, net,
287                                     TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
288                 if (err < 0)
289                         goto errout;
290         }
291
292         return 0;
293
294 errout:
295         tcindex_free_perfect_hash(cp);
296         return err;
297 }
298
299 static int
300 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
301                   u32 handle, struct tcindex_data *p,
302                   struct tcindex_filter_result *r, struct nlattr **tb,
303                   struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
304 {
305         struct tcindex_filter_result new_filter_result, *old_r = r;
306         struct tcindex_data *cp = NULL, *oldp;
307         struct tcindex_filter *f = NULL; /* make gcc behave */
308         struct tcf_result cr = {};
309         int err, balloc = 0;
310         struct tcf_exts e;
311
312         err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
313         if (err < 0)
314                 return err;
315         err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
316         if (err < 0)
317                 goto errout;
318
319         err = -ENOMEM;
320         /* tcindex_data attributes must look atomic to classifier/lookup so
321          * allocate new tcindex data and RCU assign it onto root. Keeping
322          * perfect hash and hash pointers from old data.
323          */
324         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
325         if (!cp)
326                 goto errout;
327
328         cp->mask = p->mask;
329         cp->shift = p->shift;
330         cp->hash = p->hash;
331         cp->alloc_hash = p->alloc_hash;
332         cp->fall_through = p->fall_through;
333         cp->tp = tp;
334
335         if (p->perfect) {
336                 int i;
337
338                 if (tcindex_alloc_perfect_hash(net, cp) < 0)
339                         goto errout;
340                 for (i = 0; i < cp->hash; i++)
341                         cp->perfect[i].res = p->perfect[i].res;
342                 balloc = 1;
343         }
344         cp->h = p->h;
345
346         err = tcindex_filter_result_init(&new_filter_result, net);
347         if (err < 0)
348                 goto errout1;
349         if (old_r)
350                 cr = r->res;
351
352         if (tb[TCA_TCINDEX_HASH])
353                 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
354
355         if (tb[TCA_TCINDEX_MASK])
356                 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
357
358         if (tb[TCA_TCINDEX_SHIFT])
359                 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
360
361         err = -EBUSY;
362
363         /* Hash already allocated, make sure that we still meet the
364          * requirements for the allocated hash.
365          */
366         if (cp->perfect) {
367                 if (!valid_perfect_hash(cp) ||
368                     cp->hash > cp->alloc_hash)
369                         goto errout_alloc;
370         } else if (cp->h && cp->hash != cp->alloc_hash) {
371                 goto errout_alloc;
372         }
373
374         err = -EINVAL;
375         if (tb[TCA_TCINDEX_FALL_THROUGH])
376                 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
377
378         if (!cp->hash) {
379                 /* Hash not specified, use perfect hash if the upper limit
380                  * of the hashing index is below the threshold.
381                  */
382                 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
383                         cp->hash = (cp->mask >> cp->shift) + 1;
384                 else
385                         cp->hash = DEFAULT_HASH_SIZE;
386         }
387
388         if (!cp->perfect && !cp->h)
389                 cp->alloc_hash = cp->hash;
390
391         /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
392          * but then, we'd fail handles that may become valid after some future
393          * mask change. While this is extremely unlikely to ever matter,
394          * the check below is safer (and also more backwards-compatible).
395          */
396         if (cp->perfect || valid_perfect_hash(cp))
397                 if (handle >= cp->alloc_hash)
398                         goto errout_alloc;
399
400
401         err = -ENOMEM;
402         if (!cp->perfect && !cp->h) {
403                 if (valid_perfect_hash(cp)) {
404                         if (tcindex_alloc_perfect_hash(net, cp) < 0)
405                                 goto errout_alloc;
406                         balloc = 1;
407                 } else {
408                         struct tcindex_filter __rcu **hash;
409
410                         hash = kcalloc(cp->hash,
411                                        sizeof(struct tcindex_filter *),
412                                        GFP_KERNEL);
413
414                         if (!hash)
415                                 goto errout_alloc;
416
417                         cp->h = hash;
418                         balloc = 2;
419                 }
420         }
421
422         if (cp->perfect)
423                 r = cp->perfect + handle;
424         else
425                 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
426
427         if (r == &new_filter_result) {
428                 f = kzalloc(sizeof(*f), GFP_KERNEL);
429                 if (!f)
430                         goto errout_alloc;
431                 f->key = handle;
432                 f->next = NULL;
433                 err = tcindex_filter_result_init(&f->result, net);
434                 if (err < 0) {
435                         kfree(f);
436                         goto errout_alloc;
437                 }
438         }
439
440         if (tb[TCA_TCINDEX_CLASSID]) {
441                 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
442                 tcf_bind_filter(tp, &cr, base);
443         }
444
445         if (old_r && old_r != r) {
446                 err = tcindex_filter_result_init(old_r, net);
447                 if (err < 0) {
448                         kfree(f);
449                         goto errout_alloc;
450                 }
451         }
452
453         oldp = p;
454         r->res = cr;
455         tcf_exts_change(&r->exts, &e);
456
457         rcu_assign_pointer(tp->root, cp);
458
459         if (r == &new_filter_result) {
460                 struct tcindex_filter *nfp;
461                 struct tcindex_filter __rcu **fp;
462
463                 f->result.res = r->res;
464                 tcf_exts_change(&f->result.exts, &r->exts);
465
466                 fp = cp->h + (handle % cp->hash);
467                 for (nfp = rtnl_dereference(*fp);
468                      nfp;
469                      fp = &nfp->next, nfp = rtnl_dereference(*fp))
470                                 ; /* nothing */
471
472                 rcu_assign_pointer(*fp, f);
473         } else {
474                 tcf_exts_destroy(&new_filter_result.exts);
475         }
476
477         if (oldp)
478                 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
479         return 0;
480
481 errout_alloc:
482         if (balloc == 1)
483                 tcindex_free_perfect_hash(cp);
484         else if (balloc == 2)
485                 kfree(cp->h);
486 errout1:
487         tcf_exts_destroy(&new_filter_result.exts);
488 errout:
489         kfree(cp);
490         tcf_exts_destroy(&e);
491         return err;
492 }
493
494 static int
495 tcindex_change(struct net *net, struct sk_buff *in_skb,
496                struct tcf_proto *tp, unsigned long base, u32 handle,
497                struct nlattr **tca, void **arg, bool ovr,
498                bool rtnl_held, struct netlink_ext_ack *extack)
499 {
500         struct nlattr *opt = tca[TCA_OPTIONS];
501         struct nlattr *tb[TCA_TCINDEX_MAX + 1];
502         struct tcindex_data *p = rtnl_dereference(tp->root);
503         struct tcindex_filter_result *r = *arg;
504         int err;
505
506         pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
507             "p %p,r %p,*arg %p\n",
508             tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
509
510         if (!opt)
511                 return 0;
512
513         err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
514                                           tcindex_policy, NULL);
515         if (err < 0)
516                 return err;
517
518         return tcindex_set_parms(net, tp, base, handle, p, r, tb,
519                                  tca[TCA_RATE], ovr, extack);
520 }
521
522 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
523                          bool rtnl_held)
524 {
525         struct tcindex_data *p = rtnl_dereference(tp->root);
526         struct tcindex_filter *f, *next;
527         int i;
528
529         pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
530         if (p->perfect) {
531                 for (i = 0; i < p->hash; i++) {
532                         if (!p->perfect[i].res.class)
533                                 continue;
534                         if (walker->count >= walker->skip) {
535                                 if (walker->fn(tp, p->perfect + i, walker) < 0) {
536                                         walker->stop = 1;
537                                         return;
538                                 }
539                         }
540                         walker->count++;
541                 }
542         }
543         if (!p->h)
544                 return;
545         for (i = 0; i < p->hash; i++) {
546                 for (f = rtnl_dereference(p->h[i]); f; f = next) {
547                         next = rtnl_dereference(f->next);
548                         if (walker->count >= walker->skip) {
549                                 if (walker->fn(tp, &f->result, walker) < 0) {
550                                         walker->stop = 1;
551                                         return;
552                                 }
553                         }
554                         walker->count++;
555                 }
556         }
557 }
558
559 static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
560                             struct netlink_ext_ack *extack)
561 {
562         struct tcindex_data *p = rtnl_dereference(tp->root);
563         int i;
564
565         pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
566
567         if (p->perfect) {
568                 for (i = 0; i < p->hash; i++) {
569                         struct tcindex_filter_result *r = p->perfect + i;
570
571                         tcf_unbind_filter(tp, &r->res);
572                         if (tcf_exts_get_net(&r->exts))
573                                 tcf_queue_work(&r->rwork,
574                                                tcindex_destroy_rexts_work);
575                         else
576                                 __tcindex_destroy_rexts(r);
577                 }
578         }
579
580         for (i = 0; p->h && i < p->hash; i++) {
581                 struct tcindex_filter *f, *next;
582                 bool last;
583
584                 for (f = rtnl_dereference(p->h[i]); f; f = next) {
585                         next = rtnl_dereference(f->next);
586                         tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
587                 }
588         }
589
590         tcf_queue_work(&p->rwork, tcindex_destroy_work);
591 }
592
593
594 static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
595                         struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
596 {
597         struct tcindex_data *p = rtnl_dereference(tp->root);
598         struct tcindex_filter_result *r = fh;
599         struct nlattr *nest;
600
601         pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
602                  tp, fh, skb, t, p, r);
603         pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
604
605         nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
606         if (nest == NULL)
607                 goto nla_put_failure;
608
609         if (!fh) {
610                 t->tcm_handle = ~0; /* whatever ... */
611                 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
612                     nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
613                     nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
614                     nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
615                         goto nla_put_failure;
616                 nla_nest_end(skb, nest);
617         } else {
618                 if (p->perfect) {
619                         t->tcm_handle = r - p->perfect;
620                 } else {
621                         struct tcindex_filter *f;
622                         struct tcindex_filter __rcu **fp;
623                         int i;
624
625                         t->tcm_handle = 0;
626                         for (i = 0; !t->tcm_handle && i < p->hash; i++) {
627                                 fp = &p->h[i];
628                                 for (f = rtnl_dereference(*fp);
629                                      !t->tcm_handle && f;
630                                      fp = &f->next, f = rtnl_dereference(*fp)) {
631                                         if (&f->result == r)
632                                                 t->tcm_handle = f->key;
633                                 }
634                         }
635                 }
636                 pr_debug("handle = %d\n", t->tcm_handle);
637                 if (r->res.class &&
638                     nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
639                         goto nla_put_failure;
640
641                 if (tcf_exts_dump(skb, &r->exts) < 0)
642                         goto nla_put_failure;
643                 nla_nest_end(skb, nest);
644
645                 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
646                         goto nla_put_failure;
647         }
648
649         return skb->len;
650
651 nla_put_failure:
652         nla_nest_cancel(skb, nest);
653         return -1;
654 }
655
656 static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
657 {
658         struct tcindex_filter_result *r = fh;
659
660         if (r && r->res.classid == classid)
661                 r->res.class = cl;
662 }
663
664 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
665         .kind           =       "tcindex",
666         .classify       =       tcindex_classify,
667         .init           =       tcindex_init,
668         .destroy        =       tcindex_destroy,
669         .get            =       tcindex_get,
670         .change         =       tcindex_change,
671         .delete         =       tcindex_delete,
672         .walk           =       tcindex_walk,
673         .dump           =       tcindex_dump,
674         .bind_class     =       tcindex_bind_class,
675         .owner          =       THIS_MODULE,
676 };
677
678 static int __init init_tcindex(void)
679 {
680         return register_tcf_proto_ops(&cls_tcindex_ops);
681 }
682
683 static void __exit exit_tcindex(void)
684 {
685         unregister_tcf_proto_ops(&cls_tcindex_ops);
686 }
687
688 module_init(init_tcindex)
689 module_exit(exit_tcindex)
690 MODULE_LICENSE("GPL");