]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - net/sched/sch_pie.c
Merge branch 'parisc-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[linux.git] / net / sched / sch_pie.c
index b0b0dc46af61e78e9e209660852138fff1101cfd..915bcdb59a9f685db85f4917de639e68aaca7a4a 100644 (file)
 #include <linux/skbuff.h>
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
-
-#define QUEUE_THRESHOLD 16384
-#define DQCOUNT_INVALID -1
-#define DTIME_INVALID 0xffffffffffffffff
-#define MAX_PROB 0xffffffffffffffff
-#define PIE_SCALE 8
-
-/* parameters used */
-struct pie_params {
-       psched_time_t target;   /* user specified target delay in pschedtime */
-       u32 tupdate;            /* timer frequency (in jiffies) */
-       u32 limit;              /* number of packets that can be enqueued */
-       u32 alpha;              /* alpha and beta are between 0 and 32 */
-       u32 beta;               /* and are used for shift relative to 1 */
-       bool ecn;               /* true if ecn is enabled */
-       bool bytemode;          /* to scale drop early prob based on pkt size */
-       u8 dq_rate_estimator;   /* to calculate delay using Little's law */
-};
-
-/* variables used */
-struct pie_vars {
-       u64 prob;               /* probability but scaled by u64 limit. */
-       psched_time_t burst_time;
-       psched_time_t qdelay;
-       psched_time_t qdelay_old;
-       u64 dq_count;           /* measured in bytes */
-       psched_time_t dq_tstamp;        /* drain rate */
-       u64 accu_prob;          /* accumulated drop probability */
-       u32 avg_dq_rate;        /* bytes per pschedtime tick,scaled */
-       u32 qlen_old;           /* in bytes */
-       u8 accu_prob_overflows; /* overflows of accu_prob */
-};
-
-/* statistics gathering */
-struct pie_stats {
-       u32 packets_in;         /* total number of packets enqueued */
-       u32 dropped;            /* packets dropped due to pie_action */
-       u32 overlimit;          /* dropped due to lack of space in queue */
-       u32 maxq;               /* maximum queue size */
-       u32 ecn_mark;           /* packets marked with ECN */
-};
+#include <net/pie.h>
 
 /* private data for the Qdisc */
 struct pie_sched_data {
-       struct pie_params params;
        struct pie_vars vars;
+       struct pie_params params;
        struct pie_stats stats;
        struct timer_list adapt_timer;
        struct Qdisc *sch;
 };
 
-static void pie_params_init(struct pie_params *params)
+bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
+                   struct pie_vars *vars, u32 qlen, u32 packet_size)
 {
-       params->alpha = 2;
-       params->beta = 20;
-       params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */
-       params->limit = 1000;   /* default of 1000 packets */
-       params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC);   /* 15 ms */
-       params->ecn = false;
-       params->bytemode = false;
-       params->dq_rate_estimator = false;
-}
-
-/* private skb vars */
-struct pie_skb_cb {
-       psched_time_t enqueue_time;
-};
-
-static struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
-{
-       qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb));
-       return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data;
-}
-
-static psched_time_t pie_get_enqueue_time(const struct sk_buff *skb)
-{
-       return get_pie_cb(skb)->enqueue_time;
-}
-
-static void pie_set_enqueue_time(struct sk_buff *skb)
-{
-       get_pie_cb(skb)->enqueue_time = psched_get_time();
-}
-
-static void pie_vars_init(struct pie_vars *vars)
-{
-       vars->dq_count = DQCOUNT_INVALID;
-       vars->dq_tstamp = DTIME_INVALID;
-       vars->accu_prob = 0;
-       vars->avg_dq_rate = 0;
-       /* default of 150 ms in pschedtime */
-       vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC);
-       vars->accu_prob_overflows = 0;
-}
-
-static bool drop_early(struct Qdisc *sch, u32 packet_size)
-{
-       struct pie_sched_data *q = qdisc_priv(sch);
        u64 rnd;
-       u64 local_prob = q->vars.prob;
+       u64 local_prob = vars->prob;
        u32 mtu = psched_mtu(qdisc_dev(sch));
 
        /* If there is still burst allowance left skip random early drop */
-       if (q->vars.burst_time > 0)
+       if (vars->burst_time > 0)
                return false;
 
        /* If current delay is less than half of target, and
         * if drop prob is low already, disable early_drop
         */
-       if ((q->vars.qdelay < q->params.target / 2) &&
-           (q->vars.prob < MAX_PROB / 5))
+       if ((vars->qdelay < params->target / 2) &&
+           (vars->prob < MAX_PROB / 5))
                return false;
 
-       /* If we have fewer than 2 mtu-sized packets, disable drop_early,
+       /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
         * similar to min_th in RED
         */
-       if (sch->qstats.backlog < 2 * mtu)
+       if (qlen < 2 * mtu)
                return false;
 
        /* If bytemode is turned on, use packet size to compute new
         * probablity. Smaller packets will have lower drop prob in this case
         */
-       if (q->params.bytemode && packet_size <= mtu)
+       if (params->bytemode && packet_size <= mtu)
                local_prob = (u64)packet_size * div_u64(local_prob, mtu);
        else
-               local_prob = q->vars.prob;
+               local_prob = vars->prob;
 
        if (local_prob == 0) {
-               q->vars.accu_prob = 0;
-               q->vars.accu_prob_overflows = 0;
+               vars->accu_prob = 0;
+               vars->accu_prob_overflows = 0;
        }
 
-       if (local_prob > MAX_PROB - q->vars.accu_prob)
-               q->vars.accu_prob_overflows++;
+       if (local_prob > MAX_PROB - vars->accu_prob)
+               vars->accu_prob_overflows++;
 
-       q->vars.accu_prob += local_prob;
+       vars->accu_prob += local_prob;
 
-       if (q->vars.accu_prob_overflows == 0 &&
-           q->vars.accu_prob < (MAX_PROB / 100) * 85)
+       if (vars->accu_prob_overflows == 0 &&
+           vars->accu_prob < (MAX_PROB / 100) * 85)
                return false;
-       if (q->vars.accu_prob_overflows == 8 &&
-           q->vars.accu_prob >= MAX_PROB / 2)
+       if (vars->accu_prob_overflows == 8 &&
+           vars->accu_prob >= MAX_PROB / 2)
                return true;
 
        prandom_bytes(&rnd, 8);
        if (rnd < local_prob) {
-               q->vars.accu_prob = 0;
-               q->vars.accu_prob_overflows = 0;
+               vars->accu_prob = 0;
+               vars->accu_prob_overflows = 0;
                return true;
        }
 
        return false;
 }
+EXPORT_SYMBOL_GPL(pie_drop_early);
 
 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                             struct sk_buff **to_free)
@@ -184,7 +101,8 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                goto out;
        }
 
-       if (!drop_early(sch, skb->len)) {
+       if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
+                           skb->len)) {
                enqueue = true;
        } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
                   INET_ECN_set_ce(skb)) {
@@ -216,14 +134,14 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 }
 
 static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
-       [TCA_PIE_TARGET] = {.type = NLA_U32},
-       [TCA_PIE_LIMIT] = {.type = NLA_U32},
-       [TCA_PIE_TUPDATE] = {.type = NLA_U32},
-       [TCA_PIE_ALPHA] = {.type = NLA_U32},
-       [TCA_PIE_BETA] = {.type = NLA_U32},
-       [TCA_PIE_ECN] = {.type = NLA_U32},
-       [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
-       [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
+       [TCA_PIE_TARGET]                = {.type = NLA_U32},
+       [TCA_PIE_LIMIT]                 = {.type = NLA_U32},
+       [TCA_PIE_TUPDATE]               = {.type = NLA_U32},
+       [TCA_PIE_ALPHA]                 = {.type = NLA_U32},
+       [TCA_PIE_BETA]                  = {.type = NLA_U32},
+       [TCA_PIE_ECN]                   = {.type = NLA_U32},
+       [TCA_PIE_BYTEMODE]              = {.type = NLA_U32},
+       [TCA_PIE_DQ_RATE_ESTIMATOR]     = {.type = NLA_U32},
 };
 
 static int pie_change(struct Qdisc *sch, struct nlattr *opt,
@@ -296,26 +214,25 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
        return 0;
 }
 
-static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
+void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
+                        struct pie_vars *vars, u32 qlen)
 {
-       struct pie_sched_data *q = qdisc_priv(sch);
-       int qlen = sch->qstats.backlog; /* current queue size in bytes */
        psched_time_t now = psched_get_time();
        u32 dtime = 0;
 
        /* If dq_rate_estimator is disabled, calculate qdelay using the
         * packet timestamp.
         */
-       if (!q->params.dq_rate_estimator) {
-               q->vars.qdelay = now - pie_get_enqueue_time(skb);
+       if (!params->dq_rate_estimator) {
+               vars->qdelay = now - pie_get_enqueue_time(skb);
 
-               if (q->vars.dq_tstamp != DTIME_INVALID)
-                       dtime = now - q->vars.dq_tstamp;
+               if (vars->dq_tstamp != DTIME_INVALID)
+                       dtime = now - vars->dq_tstamp;
 
-               q->vars.dq_tstamp = now;
+               vars->dq_tstamp = now;
 
                if (qlen == 0)
-                       q->vars.qdelay = 0;
+                       vars->qdelay = 0;
 
                if (dtime == 0)
                        return;
@@ -327,39 +244,39 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
         * we have enough packets to calculate the drain rate. Save
         * current time as dq_tstamp and start measurement cycle.
         */
-       if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) {
-               q->vars.dq_tstamp = psched_get_time();
-               q->vars.dq_count = 0;
+       if (qlen >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
+               vars->dq_tstamp = psched_get_time();
+               vars->dq_count = 0;
        }
 
-       /* Calculate the average drain rate from this value.  If queue length
-        * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset
+       /* Calculate the average drain rate from this value. If queue length
+        * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset
         * the dq_count to -1 as we don't have enough packets to calculate the
-        * drain rate anymore The following if block is entered only when we
+        * drain rate anymore. The following if block is entered only when we
         * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
         * and we calculate the drain rate for the threshold here.  dq_count is
         * in bytes, time difference in psched_time, hence rate is in
         * bytes/psched_time.
         */
-       if (q->vars.dq_count != DQCOUNT_INVALID) {
-               q->vars.dq_count += skb->len;
+       if (vars->dq_count != DQCOUNT_INVALID) {
+               vars->dq_count += skb->len;
 
-               if (q->vars.dq_count >= QUEUE_THRESHOLD) {
-                       u32 count = q->vars.dq_count << PIE_SCALE;
+               if (vars->dq_count >= QUEUE_THRESHOLD) {
+                       u32 count = vars->dq_count << PIE_SCALE;
 
-                       dtime = now - q->vars.dq_tstamp;
+                       dtime = now - vars->dq_tstamp;
 
                        if (dtime == 0)
                                return;
 
                        count = count / dtime;
 
-                       if (q->vars.avg_dq_rate == 0)
-                               q->vars.avg_dq_rate = count;
+                       if (vars->avg_dq_rate == 0)
+                               vars->avg_dq_rate = count;
                        else
-                               q->vars.avg_dq_rate =
-                                   (q->vars.avg_dq_rate -
-                                    (q->vars.avg_dq_rate >> 3)) + (count >> 3);
+                               vars->avg_dq_rate =
+                                   (vars->avg_dq_rate -
+                                    (vars->avg_dq_rate >> 3)) + (count >> 3);
 
                        /* If the queue has receded below the threshold, we hold
                         * on to the last drain rate calculated, else we reset
@@ -367,10 +284,10 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
                         * packet is dequeued
                         */
                        if (qlen < QUEUE_THRESHOLD) {
-                               q->vars.dq_count = DQCOUNT_INVALID;
+                               vars->dq_count = DQCOUNT_INVALID;
                        } else {
-                               q->vars.dq_count = 0;
-                               q->vars.dq_tstamp = psched_get_time();
+                               vars->dq_count = 0;
+                               vars->dq_tstamp = psched_get_time();
                        }
 
                        goto burst_allowance_reduction;
@@ -380,18 +297,18 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
        return;
 
 burst_allowance_reduction:
-       if (q->vars.burst_time > 0) {
-               if (q->vars.burst_time > dtime)
-                       q->vars.burst_time -= dtime;
+       if (vars->burst_time > 0) {
+               if (vars->burst_time > dtime)
+                       vars->burst_time -= dtime;
                else
-                       q->vars.burst_time = 0;
+                       vars->burst_time = 0;
        }
 }
+EXPORT_SYMBOL_GPL(pie_process_dequeue);
 
-static void calculate_probability(struct Qdisc *sch)
+void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
+                              u32 qlen)
 {
-       struct pie_sched_data *q = qdisc_priv(sch);
-       u32 qlen = sch->qstats.backlog; /* queue size in bytes */
        psched_time_t qdelay = 0;       /* in pschedtime */
        psched_time_t qdelay_old = 0;   /* in pschedtime */
        s64 delta = 0;          /* determines the change in probability */
@@ -400,21 +317,21 @@ static void calculate_probability(struct Qdisc *sch)
        u32 power;
        bool update_prob = true;
 
-       if (q->params.dq_rate_estimator) {
-               qdelay_old = q->vars.qdelay;
-               q->vars.qdelay_old = q->vars.qdelay;
+       if (params->dq_rate_estimator) {
+               qdelay_old = vars->qdelay;
+               vars->qdelay_old = vars->qdelay;
 
-               if (q->vars.avg_dq_rate > 0)
-                       qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
+               if (vars->avg_dq_rate > 0)
+                       qdelay = (qlen << PIE_SCALE) / vars->avg_dq_rate;
                else
                        qdelay = 0;
        } else {
-               qdelay = q->vars.qdelay;
-               qdelay_old = q->vars.qdelay_old;
+               qdelay = vars->qdelay;
+               qdelay_old = vars->qdelay_old;
        }
 
-       /* If qdelay is zero and qlen is not, it means qlen is very small, less
-        * than dequeue_rate, so we do not update probabilty in this round
+       /* If qdelay is zero and qlen is not, it means qlen is very small,
+        * so we do not update probabilty in this round.
         */
        if (qdelay == 0 && qlen != 0)
                update_prob = false;
@@ -426,18 +343,18 @@ static void calculate_probability(struct Qdisc *sch)
         * probability. alpha/beta are updated locally below by scaling down
         * by 16 to come to 0-2 range.
         */
-       alpha = ((u64)q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
-       beta = ((u64)q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
+       alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
+       beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
 
        /* We scale alpha and beta differently depending on how heavy the
         * congestion is. Please see RFC 8033 for details.
         */
-       if (q->vars.prob < MAX_PROB / 10) {
+       if (vars->prob < MAX_PROB / 10) {
                alpha >>= 1;
                beta >>= 1;
 
                power = 100;
-               while (q->vars.prob < div_u64(MAX_PROB, power) &&
+               while (vars->prob < div_u64(MAX_PROB, power) &&
                       power <= 1000000) {
                        alpha >>= 2;
                        beta >>= 2;
@@ -446,14 +363,14 @@ static void calculate_probability(struct Qdisc *sch)
        }
 
        /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
-       delta += alpha * (u64)(qdelay - q->params.target);
+       delta += alpha * (u64)(qdelay - params->target);
        delta += beta * (u64)(qdelay - qdelay_old);
 
-       oldprob = q->vars.prob;
+       oldprob = vars->prob;
 
        /* to ensure we increase probability in steps of no more than 2% */
        if (delta > (s64)(MAX_PROB / (100 / 2)) &&
-           q->vars.prob >= MAX_PROB / 10)
+           vars->prob >= MAX_PROB / 10)
                delta = (MAX_PROB / 100) * 2;
 
        /* Non-linear drop:
@@ -464,12 +381,12 @@ static void calculate_probability(struct Qdisc *sch)
        if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
                delta += MAX_PROB / (100 / 2);
 
-       q->vars.prob += delta;
+       vars->prob += delta;
 
        if (delta > 0) {
                /* prevent overflow */
-               if (q->vars.prob < oldprob) {
-                       q->vars.prob = MAX_PROB;
+               if (vars->prob < oldprob) {
+                       vars->prob = MAX_PROB;
                        /* Prevent normalization error. If probability is at
                         * maximum value already, we normalize it here, and
                         * skip the check to do a non-linear drop in the next
@@ -479,8 +396,8 @@ static void calculate_probability(struct Qdisc *sch)
                }
        } else {
                /* prevent underflow */
-               if (q->vars.prob > oldprob)
-                       q->vars.prob = 0;
+               if (vars->prob > oldprob)
+                       vars->prob = 0;
        }
 
        /* Non-linear drop in probability: Reduce drop probability quickly if
@@ -489,10 +406,10 @@ static void calculate_probability(struct Qdisc *sch)
 
        if (qdelay == 0 && qdelay_old == 0 && update_prob)
                /* Reduce drop probability to 98.4% */
-               q->vars.prob -= q->vars.prob / 64u;
+               vars->prob -= vars->prob / 64;
 
-       q->vars.qdelay = qdelay;
-       q->vars.qlen_old = qlen;
+       vars->qdelay = qdelay;
+       vars->qlen_old = qlen;
 
        /* We restart the measurement cycle if the following conditions are met
         * 1. If the delay has been low for 2 consecutive Tupdate periods
@@ -500,16 +417,17 @@ static void calculate_probability(struct Qdisc *sch)
         * 3. If average dq_rate_estimator is enabled, we have atleast one
         *    estimate for the avg_dq_rate ie., is a non-zero value
         */
-       if ((q->vars.qdelay < q->params.target / 2) &&
-           (q->vars.qdelay_old < q->params.target / 2) &&
-           q->vars.prob == 0 &&
-           (!q->params.dq_rate_estimator || q->vars.avg_dq_rate > 0)) {
-               pie_vars_init(&q->vars);
+       if ((vars->qdelay < params->target / 2) &&
+           (vars->qdelay_old < params->target / 2) &&
+           vars->prob == 0 &&
+           (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) {
+               pie_vars_init(vars);
        }
 
-       if (!q->params.dq_rate_estimator)
-               q->vars.qdelay_old = qdelay;
+       if (!params->dq_rate_estimator)
+               vars->qdelay_old = qdelay;
 }
+EXPORT_SYMBOL_GPL(pie_calculate_probability);
 
 static void pie_timer(struct timer_list *t)
 {
@@ -518,7 +436,7 @@ static void pie_timer(struct timer_list *t)
        spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
 
        spin_lock(root_lock);
-       calculate_probability(sch);
+       pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
 
        /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
        if (q->params.tupdate)
@@ -607,12 +525,13 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 
 static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
 {
+       struct pie_sched_data *q = qdisc_priv(sch);
        struct sk_buff *skb = qdisc_dequeue_head(sch);
 
        if (!skb)
                return NULL;
 
-       pie_process_dequeue(sch, skb);
+       pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog);
        return skb;
 }
 
@@ -633,7 +552,7 @@ static void pie_destroy(struct Qdisc *sch)
 }
 
 static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
-       .id = "pie",
+       .id             = "pie",
        .priv_size      = sizeof(struct pie_sched_data),
        .enqueue        = pie_qdisc_enqueue,
        .dequeue        = pie_qdisc_dequeue,