1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2013 Cisco Systems, Inc, 2013.
4 * Author: Vijay Subramanian <vijaynsu@cisco.com>
5 * Author: Mythili Prabhu <mysuryan@cisco.com>
7 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
8 * University of Oslo, Norway.
11 * RFC 8033: https://tools.ietf.org/html/rfc8033
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/pkt_sched.h>
21 #include <net/inet_ecn.h>
24 /* private data for the Qdisc */
25 struct pie_sched_data {
27 struct pie_params params;
28 struct pie_stats stats;
29 struct timer_list adapt_timer;
33 bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
34 struct pie_vars *vars, u32 qlen, u32 packet_size)
37 u64 local_prob = vars->prob;
38 u32 mtu = psched_mtu(qdisc_dev(sch));
40 /* If there is still burst allowance left skip random early drop */
41 if (vars->burst_time > 0)
44 /* If current delay is less than half of target, and
45 * if drop prob is low already, disable early_drop
47 if ((vars->qdelay < params->target / 2) &&
48 (vars->prob < MAX_PROB / 5))
51 /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
52 * similar to min_th in RED
57 /* If bytemode is turned on, use packet size to compute new
58 * probablity. Smaller packets will have lower drop prob in this case
60 if (params->bytemode && packet_size <= mtu)
61 local_prob = (u64)packet_size * div_u64(local_prob, mtu);
63 local_prob = vars->prob;
65 if (local_prob == 0) {
67 vars->accu_prob_overflows = 0;
70 if (local_prob > MAX_PROB - vars->accu_prob)
71 vars->accu_prob_overflows++;
73 vars->accu_prob += local_prob;
75 if (vars->accu_prob_overflows == 0 &&
76 vars->accu_prob < (MAX_PROB / 100) * 85)
78 if (vars->accu_prob_overflows == 8 &&
79 vars->accu_prob >= MAX_PROB / 2)
82 prandom_bytes(&rnd, 8);
83 if (rnd < local_prob) {
85 vars->accu_prob_overflows = 0;
91 EXPORT_SYMBOL_GPL(pie_drop_early);
93 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
94 struct sk_buff **to_free)
96 struct pie_sched_data *q = qdisc_priv(sch);
99 if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
100 q->stats.overlimit++;
104 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
107 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
108 INET_ECN_set_ce(skb)) {
109 /* If packet is ecn capable, mark it if drop probability
110 * is lower than 10%, else drop it.
116 /* we can enqueue the packet */
118 /* Set enqueue time only when dq_rate_estimator is disabled. */
119 if (!q->params.dq_rate_estimator)
120 pie_set_enqueue_time(skb);
122 q->stats.packets_in++;
123 if (qdisc_qlen(sch) > q->stats.maxq)
124 q->stats.maxq = qdisc_qlen(sch);
126 return qdisc_enqueue_tail(skb, sch);
131 q->vars.accu_prob = 0;
132 q->vars.accu_prob_overflows = 0;
133 return qdisc_drop(skb, sch, to_free);
136 static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
137 [TCA_PIE_TARGET] = {.type = NLA_U32},
138 [TCA_PIE_LIMIT] = {.type = NLA_U32},
139 [TCA_PIE_TUPDATE] = {.type = NLA_U32},
140 [TCA_PIE_ALPHA] = {.type = NLA_U32},
141 [TCA_PIE_BETA] = {.type = NLA_U32},
142 [TCA_PIE_ECN] = {.type = NLA_U32},
143 [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
144 [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
147 static int pie_change(struct Qdisc *sch, struct nlattr *opt,
148 struct netlink_ext_ack *extack)
150 struct pie_sched_data *q = qdisc_priv(sch);
151 struct nlattr *tb[TCA_PIE_MAX + 1];
152 unsigned int qlen, dropped = 0;
158 err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
165 /* convert from microseconds to pschedtime */
166 if (tb[TCA_PIE_TARGET]) {
167 /* target is in us */
168 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
170 /* convert to pschedtime */
171 q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
174 /* tupdate is in jiffies */
175 if (tb[TCA_PIE_TUPDATE])
177 usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
179 if (tb[TCA_PIE_LIMIT]) {
180 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
182 q->params.limit = limit;
186 if (tb[TCA_PIE_ALPHA])
187 q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
189 if (tb[TCA_PIE_BETA])
190 q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
193 q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
195 if (tb[TCA_PIE_BYTEMODE])
196 q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
198 if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
199 q->params.dq_rate_estimator =
200 nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]);
202 /* Drop excess packets if new limit is lower */
204 while (sch->q.qlen > sch->limit) {
205 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
207 dropped += qdisc_pkt_len(skb);
208 qdisc_qstats_backlog_dec(sch, skb);
209 rtnl_qdisc_drop(skb, sch);
211 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
213 sch_tree_unlock(sch);
217 void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
218 struct pie_vars *vars, u32 qlen)
220 psched_time_t now = psched_get_time();
223 /* If dq_rate_estimator is disabled, calculate qdelay using the
226 if (!params->dq_rate_estimator) {
227 vars->qdelay = now - pie_get_enqueue_time(skb);
229 if (vars->dq_tstamp != DTIME_INVALID)
230 dtime = now - vars->dq_tstamp;
232 vars->dq_tstamp = now;
240 goto burst_allowance_reduction;
243 /* If current queue is about 10 packets or more and dq_count is unset
244 * we have enough packets to calculate the drain rate. Save
245 * current time as dq_tstamp and start measurement cycle.
247 if (qlen >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
248 vars->dq_tstamp = psched_get_time();
252 /* Calculate the average drain rate from this value. If queue length
253 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset
254 * the dq_count to -1 as we don't have enough packets to calculate the
255 * drain rate anymore. The following if block is entered only when we
256 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
257 * and we calculate the drain rate for the threshold here. dq_count is
258 * in bytes, time difference in psched_time, hence rate is in
261 if (vars->dq_count != DQCOUNT_INVALID) {
262 vars->dq_count += skb->len;
264 if (vars->dq_count >= QUEUE_THRESHOLD) {
265 u32 count = vars->dq_count << PIE_SCALE;
267 dtime = now - vars->dq_tstamp;
272 count = count / dtime;
274 if (vars->avg_dq_rate == 0)
275 vars->avg_dq_rate = count;
279 (vars->avg_dq_rate >> 3)) + (count >> 3);
281 /* If the queue has receded below the threshold, we hold
282 * on to the last drain rate calculated, else we reset
283 * dq_count to 0 to re-enter the if block when the next
286 if (qlen < QUEUE_THRESHOLD) {
287 vars->dq_count = DQCOUNT_INVALID;
290 vars->dq_tstamp = psched_get_time();
293 goto burst_allowance_reduction;
299 burst_allowance_reduction:
300 if (vars->burst_time > 0) {
301 if (vars->burst_time > dtime)
302 vars->burst_time -= dtime;
304 vars->burst_time = 0;
307 EXPORT_SYMBOL_GPL(pie_process_dequeue);
309 void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
312 psched_time_t qdelay = 0; /* in pschedtime */
313 psched_time_t qdelay_old = 0; /* in pschedtime */
314 s64 delta = 0; /* determines the change in probability */
318 bool update_prob = true;
320 if (params->dq_rate_estimator) {
321 qdelay_old = vars->qdelay;
322 vars->qdelay_old = vars->qdelay;
324 if (vars->avg_dq_rate > 0)
325 qdelay = (qlen << PIE_SCALE) / vars->avg_dq_rate;
329 qdelay = vars->qdelay;
330 qdelay_old = vars->qdelay_old;
333 /* If qdelay is zero and qlen is not, it means qlen is very small,
334 * so we do not update probabilty in this round.
336 if (qdelay == 0 && qlen != 0)
339 /* In the algorithm, alpha and beta are between 0 and 2 with typical
340 * value for alpha as 0.125. In this implementation, we use values 0-32
341 * passed from user space to represent this. Also, alpha and beta have
342 * unit of HZ and need to be scaled before they can used to update
343 * probability. alpha/beta are updated locally below by scaling down
344 * by 16 to come to 0-2 range.
346 alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
347 beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
349 /* We scale alpha and beta differently depending on how heavy the
350 * congestion is. Please see RFC 8033 for details.
352 if (vars->prob < MAX_PROB / 10) {
357 while (vars->prob < div_u64(MAX_PROB, power) &&
365 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
366 delta += alpha * (u64)(qdelay - params->target);
367 delta += beta * (u64)(qdelay - qdelay_old);
369 oldprob = vars->prob;
371 /* to ensure we increase probability in steps of no more than 2% */
372 if (delta > (s64)(MAX_PROB / (100 / 2)) &&
373 vars->prob >= MAX_PROB / 10)
374 delta = (MAX_PROB / 100) * 2;
377 * Tune drop probability to increase quickly for high delays(>= 250ms)
378 * 250ms is derived through experiments and provides error protection
381 if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
382 delta += MAX_PROB / (100 / 2);
387 /* prevent overflow */
388 if (vars->prob < oldprob) {
389 vars->prob = MAX_PROB;
390 /* Prevent normalization error. If probability is at
391 * maximum value already, we normalize it here, and
392 * skip the check to do a non-linear drop in the next
398 /* prevent underflow */
399 if (vars->prob > oldprob)
403 /* Non-linear drop in probability: Reduce drop probability quickly if
404 * delay is 0 for 2 consecutive Tupdate periods.
407 if (qdelay == 0 && qdelay_old == 0 && update_prob)
408 /* Reduce drop probability to 98.4% */
409 vars->prob -= vars->prob / 64;
411 vars->qdelay = qdelay;
412 vars->qlen_old = qlen;
414 /* We restart the measurement cycle if the following conditions are met
415 * 1. If the delay has been low for 2 consecutive Tupdate periods
416 * 2. Calculated drop probability is zero
417 * 3. If average dq_rate_estimator is enabled, we have atleast one
418 * estimate for the avg_dq_rate ie., is a non-zero value
420 if ((vars->qdelay < params->target / 2) &&
421 (vars->qdelay_old < params->target / 2) &&
423 (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) {
427 if (!params->dq_rate_estimator)
428 vars->qdelay_old = qdelay;
430 EXPORT_SYMBOL_GPL(pie_calculate_probability);
432 static void pie_timer(struct timer_list *t)
434 struct pie_sched_data *q = from_timer(q, t, adapt_timer);
435 struct Qdisc *sch = q->sch;
436 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
438 spin_lock(root_lock);
439 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
441 /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
442 if (q->params.tupdate)
443 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
444 spin_unlock(root_lock);
447 static int pie_init(struct Qdisc *sch, struct nlattr *opt,
448 struct netlink_ext_ack *extack)
450 struct pie_sched_data *q = qdisc_priv(sch);
452 pie_params_init(&q->params);
453 pie_vars_init(&q->vars);
454 sch->limit = q->params.limit;
457 timer_setup(&q->adapt_timer, pie_timer, 0);
460 int err = pie_change(sch, opt, extack);
466 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
470 static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
472 struct pie_sched_data *q = qdisc_priv(sch);
475 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
477 goto nla_put_failure;
479 /* convert target from pschedtime to us */
480 if (nla_put_u32(skb, TCA_PIE_TARGET,
481 ((u32)PSCHED_TICKS2NS(q->params.target)) /
483 nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
484 nla_put_u32(skb, TCA_PIE_TUPDATE,
485 jiffies_to_usecs(q->params.tupdate)) ||
486 nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
487 nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
488 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
489 nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode) ||
490 nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
491 q->params.dq_rate_estimator))
492 goto nla_put_failure;
494 return nla_nest_end(skb, opts);
497 nla_nest_cancel(skb, opts);
501 static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
503 struct pie_sched_data *q = qdisc_priv(sch);
504 struct tc_pie_xstats st = {
505 .prob = q->vars.prob,
506 .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
508 .packets_in = q->stats.packets_in,
509 .overlimit = q->stats.overlimit,
510 .maxq = q->stats.maxq,
511 .dropped = q->stats.dropped,
512 .ecn_mark = q->stats.ecn_mark,
515 /* avg_dq_rate is only valid if dq_rate_estimator is enabled */
516 st.dq_rate_estimating = q->params.dq_rate_estimator;
518 /* unscale and return dq_rate in bytes per sec */
519 if (q->params.dq_rate_estimator)
520 st.avg_dq_rate = q->vars.avg_dq_rate *
521 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE;
523 return gnet_stats_copy_app(d, &st, sizeof(st));
526 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
528 struct pie_sched_data *q = qdisc_priv(sch);
529 struct sk_buff *skb = qdisc_dequeue_head(sch);
534 pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog);
538 static void pie_reset(struct Qdisc *sch)
540 struct pie_sched_data *q = qdisc_priv(sch);
542 qdisc_reset_queue(sch);
543 pie_vars_init(&q->vars);
546 static void pie_destroy(struct Qdisc *sch)
548 struct pie_sched_data *q = qdisc_priv(sch);
550 q->params.tupdate = 0;
551 del_timer_sync(&q->adapt_timer);
554 static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
556 .priv_size = sizeof(struct pie_sched_data),
557 .enqueue = pie_qdisc_enqueue,
558 .dequeue = pie_qdisc_dequeue,
559 .peek = qdisc_peek_dequeued,
561 .destroy = pie_destroy,
563 .change = pie_change,
565 .dump_stats = pie_dump_stats,
566 .owner = THIS_MODULE,
569 static int __init pie_module_init(void)
571 return register_qdisc(&pie_qdisc_ops);
574 static void __exit pie_module_exit(void)
576 unregister_qdisc(&pie_qdisc_ops);
579 module_init(pie_module_init);
580 module_exit(pie_module_exit);
582 MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
583 MODULE_AUTHOR("Vijay Subramanian");
584 MODULE_AUTHOR("Mythili Prabhu");
585 MODULE_LICENSE("GPL");