1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/math64.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/rcupdate.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
27 static LIST_HEAD(taprio_list);
28 static DEFINE_SPINLOCK(taprio_list_lock);
30 #define TAPRIO_ALL_GATES_OPEN -1
32 #define FLAGS_VALID(flags) (!((flags) & ~TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST))
33 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
36 struct list_head list;
38 /* The instant that this entry "closes" and the next one
39 * should open, the qdisc will make some effort so that no
40 * packet leaves after this time.
51 struct sched_gate_list {
53 struct list_head entries;
55 ktime_t cycle_close_time;
57 s64 cycle_time_extension;
62 struct Qdisc **qdiscs;
65 enum tk_offsets tk_offset;
67 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
68 * speeds it's sub-nanoseconds per byte
71 /* Protects the update side of the RCU protected current_entry */
72 spinlock_t current_entry_lock;
73 struct sched_entry __rcu *current_entry;
74 struct sched_gate_list __rcu *oper_sched;
75 struct sched_gate_list __rcu *admin_sched;
76 struct hrtimer advance_timer;
77 struct list_head taprio_list;
81 static ktime_t sched_base_time(const struct sched_gate_list *sched)
86 return ns_to_ktime(sched->base_time);
89 static ktime_t taprio_get_time(struct taprio_sched *q)
91 ktime_t mono = ktime_get();
93 switch (q->tk_offset) {
97 return ktime_mono_to_any(mono, q->tk_offset);
103 static void taprio_free_sched_cb(struct rcu_head *head)
105 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
106 struct sched_entry *entry, *n;
111 list_for_each_entry_safe(entry, n, &sched->entries, list) {
112 list_del(&entry->list);
119 static void switch_schedules(struct taprio_sched *q,
120 struct sched_gate_list **admin,
121 struct sched_gate_list **oper)
123 rcu_assign_pointer(q->oper_sched, *admin);
124 rcu_assign_pointer(q->admin_sched, NULL);
127 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
133 /* Get how much time has been already elapsed in the current cycle. */
134 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
136 ktime_t time_since_sched_start;
139 time_since_sched_start = ktime_sub(time, sched->base_time);
140 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
145 static ktime_t get_interval_end_time(struct sched_gate_list *sched,
146 struct sched_gate_list *admin,
147 struct sched_entry *entry,
150 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
151 ktime_t intv_end, cycle_ext_end, cycle_end;
153 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
154 intv_end = ktime_add_ns(intv_start, entry->interval);
155 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
157 if (ktime_before(intv_end, cycle_end))
159 else if (admin && admin != sched &&
160 ktime_after(admin->base_time, cycle_end) &&
161 ktime_before(admin->base_time, cycle_ext_end))
162 return admin->base_time;
167 static int length_to_duration(struct taprio_sched *q, int len)
169 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
172 /* Returns the entry corresponding to next available interval. If
173 * validate_interval is set, it only validates whether the timestamp occurs
174 * when the gate corresponding to the skb's traffic class is open.
176 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
178 struct sched_gate_list *sched,
179 struct sched_gate_list *admin,
181 ktime_t *interval_start,
182 ktime_t *interval_end,
183 bool validate_interval)
185 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
186 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
187 struct sched_entry *entry = NULL, *entry_found = NULL;
188 struct taprio_sched *q = qdisc_priv(sch);
189 struct net_device *dev = qdisc_dev(sch);
190 bool entry_available = false;
194 tc = netdev_get_prio_tc_map(dev, skb->priority);
195 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
203 cycle = sched->cycle_time;
204 cycle_elapsed = get_cycle_time_elapsed(sched, time);
205 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
206 cycle_end = ktime_add_ns(curr_intv_end, cycle);
208 list_for_each_entry(entry, &sched->entries, list) {
209 curr_intv_start = curr_intv_end;
210 curr_intv_end = get_interval_end_time(sched, admin, entry,
213 if (ktime_after(curr_intv_start, cycle_end))
216 if (!(entry->gate_mask & BIT(tc)) ||
217 packet_transmit_time > entry->interval)
220 txtime = entry->next_txtime;
222 if (ktime_before(txtime, time) || validate_interval) {
223 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
224 if ((ktime_before(curr_intv_start, time) &&
225 ktime_before(transmit_end_time, curr_intv_end)) ||
226 (ktime_after(curr_intv_start, time) && !validate_interval)) {
228 *interval_start = curr_intv_start;
229 *interval_end = curr_intv_end;
231 } else if (!entry_available && !validate_interval) {
232 /* Here, we are just trying to find out the
233 * first available interval in the next cycle.
237 *interval_start = ktime_add_ns(curr_intv_start, cycle);
238 *interval_end = ktime_add_ns(curr_intv_end, cycle);
240 } else if (ktime_before(txtime, earliest_txtime) &&
242 earliest_txtime = txtime;
244 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
245 *interval_start = ktime_add(curr_intv_start, n * cycle);
246 *interval_end = ktime_add(curr_intv_end, n * cycle);
253 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
255 struct taprio_sched *q = qdisc_priv(sch);
256 struct sched_gate_list *sched, *admin;
257 ktime_t interval_start, interval_end;
258 struct sched_entry *entry;
261 sched = rcu_dereference(q->oper_sched);
262 admin = rcu_dereference(q->admin_sched);
264 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
265 &interval_start, &interval_end, true);
271 /* This returns the tstamp value set by TCP in terms of the set clock. */
272 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
274 unsigned int offset = skb_network_offset(skb);
275 const struct ipv6hdr *ipv6h;
276 const struct iphdr *iph;
277 struct ipv6hdr _ipv6h;
279 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
283 if (ipv6h->version == 4) {
284 iph = (struct iphdr *)ipv6h;
285 offset += iph->ihl * 4;
287 /* special-case 6in4 tunnelling, as that is a common way to get
288 * v6 connectivity in the home
290 if (iph->protocol == IPPROTO_IPV6) {
291 ipv6h = skb_header_pointer(skb, offset,
292 sizeof(_ipv6h), &_ipv6h);
294 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
296 } else if (iph->protocol != IPPROTO_TCP) {
299 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
303 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
306 /* There are a few scenarios where we will have to modify the txtime from
307 * what is read from next_txtime in sched_entry. They are:
308 * 1. If txtime is in the past,
309 * a. The gate for the traffic class is currently open and packet can be
310 * transmitted before it closes, schedule the packet right away.
311 * b. If the gate corresponding to the traffic class is going to open later
312 * in the cycle, set the txtime of packet to the interval start.
313 * 2. If txtime is in the future, there are packets corresponding to the
314 * current traffic class waiting to be transmitted. So, the following
315 * possibilities exist:
316 * a. We can transmit the packet before the window containing the txtime
318 * b. The window might close before the transmission can be completed
319 * successfully. So, schedule the packet in the next open window.
321 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
323 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
324 struct taprio_sched *q = qdisc_priv(sch);
325 struct sched_gate_list *sched, *admin;
326 ktime_t minimum_time, now, txtime;
327 int len, packet_transmit_time;
328 struct sched_entry *entry;
331 now = taprio_get_time(q);
332 minimum_time = ktime_add_ns(now, q->txtime_delay);
334 tcp_tstamp = get_tcp_tstamp(q, skb);
335 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
338 admin = rcu_dereference(q->admin_sched);
339 sched = rcu_dereference(q->oper_sched);
340 if (admin && ktime_after(minimum_time, admin->base_time))
341 switch_schedules(q, &admin, &sched);
343 /* Until the schedule starts, all the queues are open */
344 if (!sched || ktime_before(minimum_time, sched->base_time)) {
345 txtime = minimum_time;
349 len = qdisc_pkt_len(skb);
350 packet_transmit_time = length_to_duration(q, len);
355 entry = find_entry_to_transmit(skb, sch, sched, admin,
357 &interval_start, &interval_end,
364 txtime = entry->next_txtime;
365 txtime = max_t(ktime_t, txtime, minimum_time);
366 txtime = max_t(ktime_t, txtime, interval_start);
368 if (admin && admin != sched &&
369 ktime_after(txtime, admin->base_time)) {
375 transmit_end_time = ktime_add(txtime, packet_transmit_time);
376 minimum_time = transmit_end_time;
378 /* Update the txtime of current entry to the next time it's
381 if (ktime_after(transmit_end_time, interval_end))
382 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
383 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
385 entry->next_txtime = transmit_end_time;
392 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
393 struct sk_buff **to_free)
395 struct taprio_sched *q = qdisc_priv(sch);
399 queue = skb_get_queue_mapping(skb);
401 child = q->qdiscs[queue];
402 if (unlikely(!child))
403 return qdisc_drop(skb, sch, to_free);
405 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
406 if (!is_valid_interval(skb, sch))
407 return qdisc_drop(skb, sch, to_free);
408 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
409 skb->tstamp = get_packet_txtime(skb, sch);
411 return qdisc_drop(skb, sch, to_free);
414 qdisc_qstats_backlog_inc(sch, skb);
417 return qdisc_enqueue(skb, child, to_free);
420 static struct sk_buff *taprio_peek(struct Qdisc *sch)
422 struct taprio_sched *q = qdisc_priv(sch);
423 struct net_device *dev = qdisc_dev(sch);
424 struct sched_entry *entry;
430 entry = rcu_dereference(q->current_entry);
431 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
437 for (i = 0; i < dev->num_tx_queues; i++) {
438 struct Qdisc *child = q->qdiscs[i];
442 if (unlikely(!child))
445 skb = child->ops->peek(child);
449 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
452 prio = skb->priority;
453 tc = netdev_get_prio_tc_map(dev, prio);
455 if (!(gate_mask & BIT(tc)))
464 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
466 atomic_set(&entry->budget,
467 div64_u64((u64)entry->interval * 1000,
468 atomic64_read(&q->picos_per_byte)));
471 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
473 struct taprio_sched *q = qdisc_priv(sch);
474 struct net_device *dev = qdisc_dev(sch);
475 struct sk_buff *skb = NULL;
476 struct sched_entry *entry;
481 entry = rcu_dereference(q->current_entry);
482 /* if there's no entry, it means that the schedule didn't
483 * start yet, so force all gates to be open, this is in
484 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
487 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
492 for (i = 0; i < dev->num_tx_queues; i++) {
493 struct Qdisc *child = q->qdiscs[i];
499 if (unlikely(!child))
502 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
503 skb = child->ops->dequeue(child);
509 skb = child->ops->peek(child);
513 prio = skb->priority;
514 tc = netdev_get_prio_tc_map(dev, prio);
516 if (!(gate_mask & BIT(tc)))
519 len = qdisc_pkt_len(skb);
520 guard = ktime_add_ns(taprio_get_time(q),
521 length_to_duration(q, len));
523 /* In the case that there's no gate entry, there's no
526 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
527 ktime_after(guard, entry->close_time))
530 /* ... and no budget. */
531 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
532 atomic_sub_return(len, &entry->budget) < 0)
535 skb = child->ops->dequeue(child);
540 qdisc_bstats_update(sch, skb);
541 qdisc_qstats_backlog_dec(sch, skb);
553 static bool should_restart_cycle(const struct sched_gate_list *oper,
554 const struct sched_entry *entry)
556 if (list_is_last(&entry->list, &oper->entries))
559 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
565 static bool should_change_schedules(const struct sched_gate_list *admin,
566 const struct sched_gate_list *oper,
569 ktime_t next_base_time, extension_time;
574 next_base_time = sched_base_time(admin);
576 /* This is the simple case, the close_time would fall after
577 * the next schedule base_time.
579 if (ktime_compare(next_base_time, close_time) <= 0)
582 /* This is the cycle_time_extension case, if the close_time
583 * plus the amount that can be extended would fall after the
584 * next schedule base_time, we can extend the current schedule
587 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
589 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
590 * how precisely the extension should be made. So after
591 * conformance testing, this logic may change.
593 if (ktime_compare(next_base_time, extension_time) <= 0)
599 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
601 struct taprio_sched *q = container_of(timer, struct taprio_sched,
603 struct sched_gate_list *oper, *admin;
604 struct sched_entry *entry, *next;
605 struct Qdisc *sch = q->root;
608 spin_lock(&q->current_entry_lock);
609 entry = rcu_dereference_protected(q->current_entry,
610 lockdep_is_held(&q->current_entry_lock));
611 oper = rcu_dereference_protected(q->oper_sched,
612 lockdep_is_held(&q->current_entry_lock));
613 admin = rcu_dereference_protected(q->admin_sched,
614 lockdep_is_held(&q->current_entry_lock));
617 switch_schedules(q, &admin, &oper);
619 /* This can happen in two cases: 1. this is the very first run
620 * of this function (i.e. we weren't running any schedule
621 * previously); 2. The previous schedule just ended. The first
622 * entry of all schedules are pre-calculated during the
623 * schedule initialization.
625 if (unlikely(!entry || entry->close_time == oper->base_time)) {
626 next = list_first_entry(&oper->entries, struct sched_entry,
628 close_time = next->close_time;
632 if (should_restart_cycle(oper, entry)) {
633 next = list_first_entry(&oper->entries, struct sched_entry,
635 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
638 next = list_next_entry(entry, list);
641 close_time = ktime_add_ns(entry->close_time, next->interval);
642 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
644 if (should_change_schedules(admin, oper, close_time)) {
645 /* Set things so the next time this runs, the new
648 close_time = sched_base_time(admin);
649 switch_schedules(q, &admin, &oper);
652 next->close_time = close_time;
653 taprio_set_budget(q, next);
656 rcu_assign_pointer(q->current_entry, next);
657 spin_unlock(&q->current_entry_lock);
659 hrtimer_set_expires(&q->advance_timer, close_time);
662 __netif_schedule(sch);
665 return HRTIMER_RESTART;
668 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
669 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
670 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
671 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
672 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
675 static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
676 [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
679 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
680 [TCA_TAPRIO_ATTR_PRIOMAP] = {
681 .len = sizeof(struct tc_mqprio_qopt)
683 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
684 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
685 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
686 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
687 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
688 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
691 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
692 struct netlink_ext_ack *extack)
696 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
697 entry->command = nla_get_u8(
698 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
700 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
701 entry->gate_mask = nla_get_u32(
702 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
704 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
705 interval = nla_get_u32(
706 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
709 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
713 entry->interval = interval;
718 static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
719 int index, struct netlink_ext_ack *extack)
721 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
724 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
727 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
731 entry->index = index;
733 return fill_sched_entry(tb, entry, extack);
736 static int parse_sched_list(struct nlattr *list,
737 struct sched_gate_list *sched,
738 struct netlink_ext_ack *extack)
747 nla_for_each_nested(n, list, rem) {
748 struct sched_entry *entry;
750 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
751 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
755 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
757 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
761 err = parse_sched_entry(n, entry, i, extack);
767 list_add_tail(&entry->list, &sched->entries);
771 sched->num_entries = i;
776 static int parse_taprio_schedule(struct nlattr **tb,
777 struct sched_gate_list *new,
778 struct netlink_ext_ack *extack)
782 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
783 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
787 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
788 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
790 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
791 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
793 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
794 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
796 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
797 err = parse_sched_list(
798 tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
802 if (!new->cycle_time) {
803 struct sched_entry *entry;
806 list_for_each_entry(entry, &new->entries, list)
807 cycle = ktime_add_ns(cycle, entry->interval);
808 new->cycle_time = cycle;
814 static int taprio_parse_mqprio_opt(struct net_device *dev,
815 struct tc_mqprio_qopt *qopt,
816 struct netlink_ext_ack *extack,
821 if (!qopt && !dev->num_tc) {
822 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
826 /* If num_tc is already set, it means that the user already
827 * configured the mqprio part
832 /* Verify num_tc is not out of max range */
833 if (qopt->num_tc > TC_MAX_QUEUE) {
834 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
838 /* taprio imposes that traffic classes map 1:n to tx queues */
839 if (qopt->num_tc > dev->num_tx_queues) {
840 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
844 /* Verify priority mapping uses valid tcs */
845 for (i = 0; i < TC_BITMASK + 1; i++) {
846 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
847 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
852 for (i = 0; i < qopt->num_tc; i++) {
853 unsigned int last = qopt->offset[i] + qopt->count[i];
855 /* Verify the queue count is in tx range being equal to the
856 * real_num_tx_queues indicates the last queue is in use.
858 if (qopt->offset[i] >= dev->num_tx_queues ||
860 last > dev->real_num_tx_queues) {
861 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
865 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
868 /* Verify that the offset and counts do not overlap */
869 for (j = i + 1; j < qopt->num_tc; j++) {
870 if (last > qopt->offset[j]) {
871 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
880 static int taprio_get_start_time(struct Qdisc *sch,
881 struct sched_gate_list *sched,
884 struct taprio_sched *q = qdisc_priv(sch);
885 ktime_t now, base, cycle;
888 base = sched_base_time(sched);
889 now = taprio_get_time(q);
891 if (ktime_after(base, now)) {
896 cycle = sched->cycle_time;
898 /* The qdisc is expected to have at least one sched_entry. Moreover,
899 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
900 * something went really wrong. In that case, we should warn about this
901 * inconsistent state and return error.
906 /* Schedule the start time for the beginning of the next
909 n = div64_s64(ktime_sub_ns(now, base), cycle);
910 *start = ktime_add_ns(base, (n + 1) * cycle);
914 static void setup_first_close_time(struct taprio_sched *q,
915 struct sched_gate_list *sched, ktime_t base)
917 struct sched_entry *first;
920 first = list_first_entry(&sched->entries,
921 struct sched_entry, list);
923 cycle = sched->cycle_time;
925 /* FIXME: find a better place to do this */
926 sched->cycle_close_time = ktime_add_ns(base, cycle);
928 first->close_time = ktime_add_ns(base, first->interval);
929 taprio_set_budget(q, first);
930 rcu_assign_pointer(q->current_entry, NULL);
933 static void taprio_start_sched(struct Qdisc *sch,
934 ktime_t start, struct sched_gate_list *new)
936 struct taprio_sched *q = qdisc_priv(sch);
939 expires = hrtimer_get_expires(&q->advance_timer);
943 /* If the new schedule starts before the next expiration, we
944 * reprogram it to the earliest one, so we change the admin
945 * schedule to the operational one at the right time.
947 start = min_t(ktime_t, start, expires);
949 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
952 static void taprio_set_picos_per_byte(struct net_device *dev,
953 struct taprio_sched *q)
955 struct ethtool_link_ksettings ecmd;
956 int speed = SPEED_10;
960 err = __ethtool_get_link_ksettings(dev, &ecmd);
964 if (ecmd.base.speed != SPEED_UNKNOWN)
965 speed = ecmd.base.speed;
968 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
969 speed * 1000 * 1000);
971 atomic64_set(&q->picos_per_byte, picos_per_byte);
972 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
973 dev->name, (long long)atomic64_read(&q->picos_per_byte),
977 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
980 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
981 struct net_device *qdev;
982 struct taprio_sched *q;
987 if (event != NETDEV_UP && event != NETDEV_CHANGE)
990 spin_lock(&taprio_list_lock);
991 list_for_each_entry(q, &taprio_list, taprio_list) {
992 qdev = qdisc_dev(q->root);
998 spin_unlock(&taprio_list_lock);
1001 taprio_set_picos_per_byte(dev, q);
1006 static void setup_txtime(struct taprio_sched *q,
1007 struct sched_gate_list *sched, ktime_t base)
1009 struct sched_entry *entry;
1012 list_for_each_entry(entry, &sched->entries, list) {
1013 entry->next_txtime = ktime_add_ns(base, interval);
1014 interval += entry->interval;
1018 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1019 struct netlink_ext_ack *extack)
1021 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1022 struct sched_gate_list *oper, *admin, *new_admin;
1023 struct taprio_sched *q = qdisc_priv(sch);
1024 struct net_device *dev = qdisc_dev(sch);
1025 struct tc_mqprio_qopt *mqprio = NULL;
1026 u32 taprio_flags = 0;
1027 int i, err, clockid;
1028 unsigned long flags;
1031 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1032 taprio_policy, extack);
1036 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1037 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1039 if (tb[TCA_TAPRIO_ATTR_FLAGS]) {
1040 taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]);
1042 if (q->flags != 0 && q->flags != taprio_flags) {
1043 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1045 } else if (!FLAGS_VALID(taprio_flags)) {
1046 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1050 q->flags = taprio_flags;
1053 err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags);
1057 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1059 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1062 INIT_LIST_HEAD(&new_admin->entries);
1065 oper = rcu_dereference(q->oper_sched);
1066 admin = rcu_dereference(q->admin_sched);
1069 if (mqprio && (oper || admin)) {
1070 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1075 err = parse_taprio_schedule(tb, new_admin, extack);
1079 if (new_admin->num_entries == 0) {
1080 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1085 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1086 clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1088 /* We only support static clockids and we don't allow
1089 * for it to be modified after the first init.
1092 (q->clockid != -1 && q->clockid != clockid)) {
1093 NL_SET_ERR_MSG(extack, "Changing the 'clockid' of a running schedule is not supported");
1098 q->clockid = clockid;
1101 if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1102 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1107 taprio_set_picos_per_byte(dev, q);
1109 /* Protects against enqueue()/dequeue() */
1110 spin_lock_bh(qdisc_lock(sch));
1112 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1113 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1114 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1119 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1122 if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
1123 !hrtimer_active(&q->advance_timer)) {
1124 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1125 q->advance_timer.function = advance_sched;
1129 netdev_set_num_tc(dev, mqprio->num_tc);
1130 for (i = 0; i < mqprio->num_tc; i++)
1131 netdev_set_tc_queue(dev, i,
1135 /* Always use supplied priority mappings */
1136 for (i = 0; i < TC_BITMASK + 1; i++)
1137 netdev_set_prio_tc_map(dev, i,
1138 mqprio->prio_tc_map[i]);
1141 switch (q->clockid) {
1142 case CLOCK_REALTIME:
1143 q->tk_offset = TK_OFFS_REAL;
1145 case CLOCK_MONOTONIC:
1146 q->tk_offset = TK_OFFS_MAX;
1148 case CLOCK_BOOTTIME:
1149 q->tk_offset = TK_OFFS_BOOT;
1152 q->tk_offset = TK_OFFS_TAI;
1155 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1160 err = taprio_get_start_time(sch, new_admin, &start);
1162 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1166 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) {
1167 setup_txtime(q, new_admin, start);
1170 rcu_assign_pointer(q->oper_sched, new_admin);
1176 rcu_assign_pointer(q->admin_sched, new_admin);
1178 call_rcu(&admin->rcu, taprio_free_sched_cb);
1180 setup_first_close_time(q, new_admin, start);
1182 /* Protects against advance_sched() */
1183 spin_lock_irqsave(&q->current_entry_lock, flags);
1185 taprio_start_sched(sch, start, new_admin);
1187 rcu_assign_pointer(q->admin_sched, new_admin);
1189 call_rcu(&admin->rcu, taprio_free_sched_cb);
1191 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1198 spin_unlock_bh(qdisc_lock(sch));
1202 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1207 static void taprio_destroy(struct Qdisc *sch)
1209 struct taprio_sched *q = qdisc_priv(sch);
1210 struct net_device *dev = qdisc_dev(sch);
1213 spin_lock(&taprio_list_lock);
1214 list_del(&q->taprio_list);
1215 spin_unlock(&taprio_list_lock);
1217 hrtimer_cancel(&q->advance_timer);
1220 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
1221 qdisc_put(q->qdiscs[i]);
1227 netdev_set_num_tc(dev, 0);
1230 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1233 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
1236 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1237 struct netlink_ext_ack *extack)
1239 struct taprio_sched *q = qdisc_priv(sch);
1240 struct net_device *dev = qdisc_dev(sch);
1243 spin_lock_init(&q->current_entry_lock);
1245 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1246 q->advance_timer.function = advance_sched;
1250 /* We only support static clockids. Use an invalid value as default
1251 * and get the valid one on taprio_change().
1255 spin_lock(&taprio_list_lock);
1256 list_add(&q->taprio_list, &taprio_list);
1257 spin_unlock(&taprio_list_lock);
1259 if (sch->parent != TC_H_ROOT)
1262 if (!netif_is_multiqueue(dev))
1265 /* pre-allocate qdisc, attachment can't fail */
1266 q->qdiscs = kcalloc(dev->num_tx_queues,
1267 sizeof(q->qdiscs[0]),
1276 for (i = 0; i < dev->num_tx_queues; i++) {
1277 struct netdev_queue *dev_queue;
1278 struct Qdisc *qdisc;
1280 dev_queue = netdev_get_tx_queue(dev, i);
1281 qdisc = qdisc_create_dflt(dev_queue,
1283 TC_H_MAKE(TC_H_MAJ(sch->handle),
1289 if (i < dev->real_num_tx_queues)
1290 qdisc_hash_add(qdisc, false);
1292 q->qdiscs[i] = qdisc;
1295 return taprio_change(sch, opt, extack);
1298 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1301 struct net_device *dev = qdisc_dev(sch);
1302 unsigned long ntx = cl - 1;
1304 if (ntx >= dev->num_tx_queues)
1307 return netdev_get_tx_queue(dev, ntx);
1310 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1311 struct Qdisc *new, struct Qdisc **old,
1312 struct netlink_ext_ack *extack)
1314 struct taprio_sched *q = qdisc_priv(sch);
1315 struct net_device *dev = qdisc_dev(sch);
1316 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1321 if (dev->flags & IFF_UP)
1322 dev_deactivate(dev);
1324 *old = q->qdiscs[cl - 1];
1325 q->qdiscs[cl - 1] = new;
1328 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1330 if (dev->flags & IFF_UP)
1336 static int dump_entry(struct sk_buff *msg,
1337 const struct sched_entry *entry)
1339 struct nlattr *item;
1341 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
1345 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1346 goto nla_put_failure;
1348 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1349 goto nla_put_failure;
1351 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1353 goto nla_put_failure;
1355 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1357 goto nla_put_failure;
1359 return nla_nest_end(msg, item);
1362 nla_nest_cancel(msg, item);
1366 static int dump_schedule(struct sk_buff *msg,
1367 const struct sched_gate_list *root)
1369 struct nlattr *entry_list;
1370 struct sched_entry *entry;
1372 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1373 root->base_time, TCA_TAPRIO_PAD))
1376 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1377 root->cycle_time, TCA_TAPRIO_PAD))
1380 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1381 root->cycle_time_extension, TCA_TAPRIO_PAD))
1384 entry_list = nla_nest_start_noflag(msg,
1385 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1389 list_for_each_entry(entry, &root->entries, list) {
1390 if (dump_entry(msg, entry) < 0)
1394 nla_nest_end(msg, entry_list);
1398 nla_nest_cancel(msg, entry_list);
1402 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1404 struct taprio_sched *q = qdisc_priv(sch);
1405 struct net_device *dev = qdisc_dev(sch);
1406 struct sched_gate_list *oper, *admin;
1407 struct tc_mqprio_qopt opt = { 0 };
1408 struct nlattr *nest, *sched_nest;
1412 oper = rcu_dereference(q->oper_sched);
1413 admin = rcu_dereference(q->admin_sched);
1415 opt.num_tc = netdev_get_num_tc(dev);
1416 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1418 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1419 opt.count[i] = dev->tc_to_txq[i].count;
1420 opt.offset[i] = dev->tc_to_txq[i].offset;
1423 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1427 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1430 if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1433 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1436 if (q->txtime_delay &&
1437 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
1440 if (oper && dump_schedule(skb, oper))
1446 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1450 if (dump_schedule(skb, admin))
1453 nla_nest_end(skb, sched_nest);
1458 return nla_nest_end(skb, nest);
1461 nla_nest_cancel(skb, sched_nest);
1464 nla_nest_cancel(skb, nest);
1471 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1473 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1478 return dev_queue->qdisc_sleeping;
1481 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1483 unsigned int ntx = TC_H_MIN(classid);
1485 if (!taprio_queue_get(sch, ntx))
1490 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1491 struct sk_buff *skb, struct tcmsg *tcm)
1493 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1495 tcm->tcm_parent = TC_H_ROOT;
1496 tcm->tcm_handle |= TC_H_MIN(cl);
1497 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1502 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1503 struct gnet_dump *d)
1507 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1509 sch = dev_queue->qdisc_sleeping;
1510 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1511 qdisc_qstats_copy(d, sch) < 0)
1516 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1518 struct net_device *dev = qdisc_dev(sch);
1524 arg->count = arg->skip;
1525 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1526 if (arg->fn(sch, ntx + 1, arg) < 0) {
1534 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
1537 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
1540 static const struct Qdisc_class_ops taprio_class_ops = {
1541 .graft = taprio_graft,
1542 .leaf = taprio_leaf,
1543 .find = taprio_find,
1544 .walk = taprio_walk,
1545 .dump = taprio_dump_class,
1546 .dump_stats = taprio_dump_class_stats,
1547 .select_queue = taprio_select_queue,
1550 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
1551 .cl_ops = &taprio_class_ops,
1553 .priv_size = sizeof(struct taprio_sched),
1554 .init = taprio_init,
1555 .change = taprio_change,
1556 .destroy = taprio_destroy,
1557 .peek = taprio_peek,
1558 .dequeue = taprio_dequeue,
1559 .enqueue = taprio_enqueue,
1560 .dump = taprio_dump,
1561 .owner = THIS_MODULE,
1564 static struct notifier_block taprio_device_notifier = {
1565 .notifier_call = taprio_dev_notifier,
1568 static int __init taprio_module_init(void)
1570 int err = register_netdevice_notifier(&taprio_device_notifier);
1575 return register_qdisc(&taprio_qdisc_ops);
1578 static void __exit taprio_module_exit(void)
1580 unregister_qdisc(&taprio_qdisc_ops);
1581 unregister_netdevice_notifier(&taprio_device_notifier);
1584 module_init(taprio_module_init);
1585 module_exit(taprio_module_exit);
1586 MODULE_LICENSE("GPL");