1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_police.c Input police filter
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * J Hadi Salim (action changes)
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <net/act_api.h>
19 #include <net/netlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_police.h>
23 /* Each policer is serialized by its individual spinlock */
25 static unsigned int police_net_id;
26 static struct tc_action_ops act_police_ops;
28 static int tcf_police_walker(struct net *net, struct sk_buff *skb,
29 struct netlink_callback *cb, int type,
30 const struct tc_action_ops *ops,
31 struct netlink_ext_ack *extack)
33 struct tc_action_net *tn = net_generic(net, police_net_id);
35 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
38 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
39 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
40 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
41 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
42 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
45 static int tcf_police_init(struct net *net, struct nlattr *nla,
46 struct nlattr *est, struct tc_action **a,
47 int ovr, int bind, bool rtnl_held,
49 struct netlink_ext_ack *extack)
51 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
52 struct nlattr *tb[TCA_POLICE_MAX + 1];
53 struct tcf_chain *goto_ch = NULL;
54 struct tc_police *parm;
55 struct tcf_police *police;
56 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
57 struct tc_action_net *tn = net_generic(net, police_net_id);
58 struct tcf_police_params *new;
65 err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
70 if (tb[TCA_POLICE_TBF] == NULL)
72 size = nla_len(tb[TCA_POLICE_TBF]);
73 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
76 parm = nla_data(tb[TCA_POLICE_TBF]);
78 err = tcf_idr_check_alloc(tn, &index, a, bind);
86 ret = tcf_idr_create(tn, index, NULL, a,
87 &act_police_ops, bind, true);
89 tcf_idr_cleanup(tn, index);
93 spin_lock_init(&(to_police(*a)->tcfp_lock));
95 tcf_idr_release(*a, bind);
98 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
102 police = to_police(*a);
103 if (parm->rate.rate) {
105 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
109 if (parm->peakrate.rate) {
110 P_tab = qdisc_get_rtab(&parm->peakrate,
111 tb[TCA_POLICE_PEAKRATE], NULL);
118 err = gen_replace_estimator(&police->tcf_bstats,
119 police->common.cpu_bstats,
120 &police->tcf_rate_est,
125 } else if (tb[TCA_POLICE_AVRATE] &&
126 (ret == ACT_P_CREATED ||
127 !gen_estimator_active(&police->tcf_rate_est))) {
132 if (tb[TCA_POLICE_RESULT]) {
133 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
134 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
135 NL_SET_ERR_MSG(extack,
136 "goto chain not allowed on fallback");
142 new = kzalloc(sizeof(*new), GFP_KERNEL);
143 if (unlikely(!new)) {
148 /* No failure allowed after this point */
149 new->tcfp_result = tcfp_result;
150 new->tcfp_mtu = parm->mtu;
151 if (!new->tcfp_mtu) {
154 new->tcfp_mtu = 255 << R_tab->rate.cell_log;
157 new->rate_present = true;
158 psched_ratecfg_precompute(&new->rate, &R_tab->rate, 0);
159 qdisc_put_rtab(R_tab);
161 new->rate_present = false;
164 new->peak_present = true;
165 psched_ratecfg_precompute(&new->peak, &P_tab->rate, 0);
166 qdisc_put_rtab(P_tab);
168 new->peak_present = false;
171 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
172 if (new->peak_present)
173 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
176 if (tb[TCA_POLICE_AVRATE])
177 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
179 spin_lock_bh(&police->tcf_lock);
180 spin_lock_bh(&police->tcfp_lock);
181 police->tcfp_t_c = ktime_get_ns();
182 police->tcfp_toks = new->tcfp_burst;
183 if (new->peak_present)
184 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
185 spin_unlock_bh(&police->tcfp_lock);
186 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
187 rcu_swap_protected(police->params,
189 lockdep_is_held(&police->tcf_lock));
190 spin_unlock_bh(&police->tcf_lock);
193 tcf_chain_put_by_act(goto_ch);
197 if (ret == ACT_P_CREATED)
198 tcf_idr_insert(tn, *a);
202 qdisc_put_rtab(P_tab);
203 qdisc_put_rtab(R_tab);
205 tcf_chain_put_by_act(goto_ch);
207 tcf_idr_release(*a, bind);
211 static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
212 struct tcf_result *res)
214 struct tcf_police *police = to_police(a);
215 struct tcf_police_params *p;
216 s64 now, toks, ptoks = 0;
219 tcf_lastuse_update(&police->tcf_tm);
220 bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
222 ret = READ_ONCE(police->tcf_action);
223 p = rcu_dereference_bh(police->params);
225 if (p->tcfp_ewma_rate) {
226 struct gnet_stats_rate_est64 sample;
228 if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
229 sample.bps >= p->tcfp_ewma_rate)
233 if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
234 if (!p->rate_present) {
235 ret = p->tcfp_result;
239 now = ktime_get_ns();
240 spin_lock_bh(&police->tcfp_lock);
241 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
242 if (p->peak_present) {
243 ptoks = toks + police->tcfp_ptoks;
244 if (ptoks > p->tcfp_mtu_ptoks)
245 ptoks = p->tcfp_mtu_ptoks;
246 ptoks -= (s64)psched_l2t_ns(&p->peak,
249 toks += police->tcfp_toks;
250 if (toks > p->tcfp_burst)
251 toks = p->tcfp_burst;
252 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
253 if ((toks|ptoks) >= 0) {
254 police->tcfp_t_c = now;
255 police->tcfp_toks = toks;
256 police->tcfp_ptoks = ptoks;
257 spin_unlock_bh(&police->tcfp_lock);
258 ret = p->tcfp_result;
261 spin_unlock_bh(&police->tcfp_lock);
265 qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
267 if (ret == TC_ACT_SHOT)
268 qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
273 static void tcf_police_cleanup(struct tc_action *a)
275 struct tcf_police *police = to_police(a);
276 struct tcf_police_params *p;
278 p = rcu_dereference_protected(police->params, 1);
283 static void tcf_police_stats_update(struct tc_action *a,
284 u64 bytes, u32 packets,
285 u64 lastuse, bool hw)
287 struct tcf_police *police = to_police(a);
288 struct tcf_t *tm = &police->tcf_tm;
290 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
292 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
294 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
297 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
300 unsigned char *b = skb_tail_pointer(skb);
301 struct tcf_police *police = to_police(a);
302 struct tcf_police_params *p;
303 struct tc_police opt = {
304 .index = police->tcf_index,
305 .refcnt = refcount_read(&police->tcf_refcnt) - ref,
306 .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
310 spin_lock_bh(&police->tcf_lock);
311 opt.action = police->tcf_action;
312 p = rcu_dereference_protected(police->params,
313 lockdep_is_held(&police->tcf_lock));
314 opt.mtu = p->tcfp_mtu;
315 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
317 psched_ratecfg_getrate(&opt.rate, &p->rate);
319 psched_ratecfg_getrate(&opt.peakrate, &p->peak);
320 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
321 goto nla_put_failure;
322 if (p->tcfp_result &&
323 nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
324 goto nla_put_failure;
325 if (p->tcfp_ewma_rate &&
326 nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
327 goto nla_put_failure;
329 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
330 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
331 t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
332 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
333 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
334 goto nla_put_failure;
335 spin_unlock_bh(&police->tcf_lock);
340 spin_unlock_bh(&police->tcf_lock);
345 static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
347 struct tc_action_net *tn = net_generic(net, police_net_id);
349 return tcf_idr_search(tn, a, index);
352 MODULE_AUTHOR("Alexey Kuznetsov");
353 MODULE_DESCRIPTION("Policing actions");
354 MODULE_LICENSE("GPL");
356 static struct tc_action_ops act_police_ops = {
359 .owner = THIS_MODULE,
360 .stats_update = tcf_police_stats_update,
361 .act = tcf_police_act,
362 .dump = tcf_police_dump,
363 .init = tcf_police_init,
364 .walk = tcf_police_walker,
365 .lookup = tcf_police_search,
366 .cleanup = tcf_police_cleanup,
367 .size = sizeof(struct tcf_police),
370 static __net_init int police_init_net(struct net *net)
372 struct tc_action_net *tn = net_generic(net, police_net_id);
374 return tc_action_net_init(net, tn, &act_police_ops);
377 static void __net_exit police_exit_net(struct list_head *net_list)
379 tc_action_net_exit(net_list, police_net_id);
382 static struct pernet_operations police_net_ops = {
383 .init = police_init_net,
384 .exit_batch = police_exit_net,
385 .id = &police_net_id,
386 .size = sizeof(struct tc_action_net),
389 static int __init police_init_module(void)
391 return tcf_register_action(&act_police_ops, &police_net_ops);
394 static void __exit police_cleanup_module(void)
396 tcf_unregister_action(&act_police_ops, &police_net_ops);
399 module_init(police_init_module);
400 module_exit(police_cleanup_module);