1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/math64.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/rcupdate.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
27 static LIST_HEAD(taprio_list);
28 static DEFINE_SPINLOCK(taprio_list_lock);
30 #define TAPRIO_ALL_GATES_OPEN -1
32 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
33 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
34 #define TAPRIO_FLAGS_INVALID U32_MAX
37 struct list_head list;
39 /* The instant that this entry "closes" and the next one
40 * should open, the qdisc will make some effort so that no
41 * packet leaves after this time.
52 struct sched_gate_list {
54 struct list_head entries;
56 ktime_t cycle_close_time;
58 s64 cycle_time_extension;
63 struct Qdisc **qdiscs;
66 enum tk_offsets tk_offset;
68 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
69 * speeds it's sub-nanoseconds per byte
72 /* Protects the update side of the RCU protected current_entry */
73 spinlock_t current_entry_lock;
74 struct sched_entry __rcu *current_entry;
75 struct sched_gate_list __rcu *oper_sched;
76 struct sched_gate_list __rcu *admin_sched;
77 struct hrtimer advance_timer;
78 struct list_head taprio_list;
79 struct sk_buff *(*dequeue)(struct Qdisc *sch);
80 struct sk_buff *(*peek)(struct Qdisc *sch);
84 struct __tc_taprio_qopt_offload {
86 struct tc_taprio_qopt_offload offload;
89 static ktime_t sched_base_time(const struct sched_gate_list *sched)
94 return ns_to_ktime(sched->base_time);
97 static ktime_t taprio_get_time(struct taprio_sched *q)
99 ktime_t mono = ktime_get();
101 switch (q->tk_offset) {
105 return ktime_mono_to_any(mono, q->tk_offset);
111 static void taprio_free_sched_cb(struct rcu_head *head)
113 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
114 struct sched_entry *entry, *n;
119 list_for_each_entry_safe(entry, n, &sched->entries, list) {
120 list_del(&entry->list);
127 static void switch_schedules(struct taprio_sched *q,
128 struct sched_gate_list **admin,
129 struct sched_gate_list **oper)
131 rcu_assign_pointer(q->oper_sched, *admin);
132 rcu_assign_pointer(q->admin_sched, NULL);
135 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
141 /* Get how much time has been already elapsed in the current cycle. */
142 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
144 ktime_t time_since_sched_start;
147 time_since_sched_start = ktime_sub(time, sched->base_time);
148 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
153 static ktime_t get_interval_end_time(struct sched_gate_list *sched,
154 struct sched_gate_list *admin,
155 struct sched_entry *entry,
158 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
159 ktime_t intv_end, cycle_ext_end, cycle_end;
161 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
162 intv_end = ktime_add_ns(intv_start, entry->interval);
163 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
165 if (ktime_before(intv_end, cycle_end))
167 else if (admin && admin != sched &&
168 ktime_after(admin->base_time, cycle_end) &&
169 ktime_before(admin->base_time, cycle_ext_end))
170 return admin->base_time;
175 static int length_to_duration(struct taprio_sched *q, int len)
177 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
180 /* Returns the entry corresponding to next available interval. If
181 * validate_interval is set, it only validates whether the timestamp occurs
182 * when the gate corresponding to the skb's traffic class is open.
184 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
186 struct sched_gate_list *sched,
187 struct sched_gate_list *admin,
189 ktime_t *interval_start,
190 ktime_t *interval_end,
191 bool validate_interval)
193 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
194 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
195 struct sched_entry *entry = NULL, *entry_found = NULL;
196 struct taprio_sched *q = qdisc_priv(sch);
197 struct net_device *dev = qdisc_dev(sch);
198 bool entry_available = false;
202 tc = netdev_get_prio_tc_map(dev, skb->priority);
203 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
211 cycle = sched->cycle_time;
212 cycle_elapsed = get_cycle_time_elapsed(sched, time);
213 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
214 cycle_end = ktime_add_ns(curr_intv_end, cycle);
216 list_for_each_entry(entry, &sched->entries, list) {
217 curr_intv_start = curr_intv_end;
218 curr_intv_end = get_interval_end_time(sched, admin, entry,
221 if (ktime_after(curr_intv_start, cycle_end))
224 if (!(entry->gate_mask & BIT(tc)) ||
225 packet_transmit_time > entry->interval)
228 txtime = entry->next_txtime;
230 if (ktime_before(txtime, time) || validate_interval) {
231 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
232 if ((ktime_before(curr_intv_start, time) &&
233 ktime_before(transmit_end_time, curr_intv_end)) ||
234 (ktime_after(curr_intv_start, time) && !validate_interval)) {
236 *interval_start = curr_intv_start;
237 *interval_end = curr_intv_end;
239 } else if (!entry_available && !validate_interval) {
240 /* Here, we are just trying to find out the
241 * first available interval in the next cycle.
245 *interval_start = ktime_add_ns(curr_intv_start, cycle);
246 *interval_end = ktime_add_ns(curr_intv_end, cycle);
248 } else if (ktime_before(txtime, earliest_txtime) &&
250 earliest_txtime = txtime;
252 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
253 *interval_start = ktime_add(curr_intv_start, n * cycle);
254 *interval_end = ktime_add(curr_intv_end, n * cycle);
261 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
263 struct taprio_sched *q = qdisc_priv(sch);
264 struct sched_gate_list *sched, *admin;
265 ktime_t interval_start, interval_end;
266 struct sched_entry *entry;
269 sched = rcu_dereference(q->oper_sched);
270 admin = rcu_dereference(q->admin_sched);
272 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
273 &interval_start, &interval_end, true);
279 static bool taprio_flags_valid(u32 flags)
281 /* Make sure no other flag bits are set. */
282 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
283 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
285 /* txtime-assist and full offload are mutually exclusive */
286 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
287 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
292 /* This returns the tstamp value set by TCP in terms of the set clock. */
293 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
295 unsigned int offset = skb_network_offset(skb);
296 const struct ipv6hdr *ipv6h;
297 const struct iphdr *iph;
298 struct ipv6hdr _ipv6h;
300 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
304 if (ipv6h->version == 4) {
305 iph = (struct iphdr *)ipv6h;
306 offset += iph->ihl * 4;
308 /* special-case 6in4 tunnelling, as that is a common way to get
309 * v6 connectivity in the home
311 if (iph->protocol == IPPROTO_IPV6) {
312 ipv6h = skb_header_pointer(skb, offset,
313 sizeof(_ipv6h), &_ipv6h);
315 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
317 } else if (iph->protocol != IPPROTO_TCP) {
320 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
324 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
327 /* There are a few scenarios where we will have to modify the txtime from
328 * what is read from next_txtime in sched_entry. They are:
329 * 1. If txtime is in the past,
330 * a. The gate for the traffic class is currently open and packet can be
331 * transmitted before it closes, schedule the packet right away.
332 * b. If the gate corresponding to the traffic class is going to open later
333 * in the cycle, set the txtime of packet to the interval start.
334 * 2. If txtime is in the future, there are packets corresponding to the
335 * current traffic class waiting to be transmitted. So, the following
336 * possibilities exist:
337 * a. We can transmit the packet before the window containing the txtime
339 * b. The window might close before the transmission can be completed
340 * successfully. So, schedule the packet in the next open window.
342 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
344 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
345 struct taprio_sched *q = qdisc_priv(sch);
346 struct sched_gate_list *sched, *admin;
347 ktime_t minimum_time, now, txtime;
348 int len, packet_transmit_time;
349 struct sched_entry *entry;
352 now = taprio_get_time(q);
353 minimum_time = ktime_add_ns(now, q->txtime_delay);
355 tcp_tstamp = get_tcp_tstamp(q, skb);
356 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
359 admin = rcu_dereference(q->admin_sched);
360 sched = rcu_dereference(q->oper_sched);
361 if (admin && ktime_after(minimum_time, admin->base_time))
362 switch_schedules(q, &admin, &sched);
364 /* Until the schedule starts, all the queues are open */
365 if (!sched || ktime_before(minimum_time, sched->base_time)) {
366 txtime = minimum_time;
370 len = qdisc_pkt_len(skb);
371 packet_transmit_time = length_to_duration(q, len);
376 entry = find_entry_to_transmit(skb, sch, sched, admin,
378 &interval_start, &interval_end,
385 txtime = entry->next_txtime;
386 txtime = max_t(ktime_t, txtime, minimum_time);
387 txtime = max_t(ktime_t, txtime, interval_start);
389 if (admin && admin != sched &&
390 ktime_after(txtime, admin->base_time)) {
396 transmit_end_time = ktime_add(txtime, packet_transmit_time);
397 minimum_time = transmit_end_time;
399 /* Update the txtime of current entry to the next time it's
402 if (ktime_after(transmit_end_time, interval_end))
403 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
404 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
406 entry->next_txtime = transmit_end_time;
413 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
414 struct sk_buff **to_free)
416 struct taprio_sched *q = qdisc_priv(sch);
420 queue = skb_get_queue_mapping(skb);
422 child = q->qdiscs[queue];
423 if (unlikely(!child))
424 return qdisc_drop(skb, sch, to_free);
426 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
427 if (!is_valid_interval(skb, sch))
428 return qdisc_drop(skb, sch, to_free);
429 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
430 skb->tstamp = get_packet_txtime(skb, sch);
432 return qdisc_drop(skb, sch, to_free);
435 qdisc_qstats_backlog_inc(sch, skb);
438 return qdisc_enqueue(skb, child, to_free);
441 static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
443 struct taprio_sched *q = qdisc_priv(sch);
444 struct net_device *dev = qdisc_dev(sch);
445 struct sched_entry *entry;
451 entry = rcu_dereference(q->current_entry);
452 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
458 for (i = 0; i < dev->num_tx_queues; i++) {
459 struct Qdisc *child = q->qdiscs[i];
463 if (unlikely(!child))
466 skb = child->ops->peek(child);
470 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
473 prio = skb->priority;
474 tc = netdev_get_prio_tc_map(dev, prio);
476 if (!(gate_mask & BIT(tc)))
485 static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
487 struct taprio_sched *q = qdisc_priv(sch);
488 struct net_device *dev = qdisc_dev(sch);
492 for (i = 0; i < dev->num_tx_queues; i++) {
493 struct Qdisc *child = q->qdiscs[i];
495 if (unlikely(!child))
498 skb = child->ops->peek(child);
508 static struct sk_buff *taprio_peek(struct Qdisc *sch)
510 struct taprio_sched *q = qdisc_priv(sch);
515 static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
517 atomic_set(&entry->budget,
518 div64_u64((u64)entry->interval * 1000,
519 atomic64_read(&q->picos_per_byte)));
522 static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
524 struct taprio_sched *q = qdisc_priv(sch);
525 struct net_device *dev = qdisc_dev(sch);
526 struct sk_buff *skb = NULL;
527 struct sched_entry *entry;
532 entry = rcu_dereference(q->current_entry);
533 /* if there's no entry, it means that the schedule didn't
534 * start yet, so force all gates to be open, this is in
535 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
538 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
543 for (i = 0; i < dev->num_tx_queues; i++) {
544 struct Qdisc *child = q->qdiscs[i];
550 if (unlikely(!child))
553 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
554 skb = child->ops->dequeue(child);
560 skb = child->ops->peek(child);
564 prio = skb->priority;
565 tc = netdev_get_prio_tc_map(dev, prio);
567 if (!(gate_mask & BIT(tc)))
570 len = qdisc_pkt_len(skb);
571 guard = ktime_add_ns(taprio_get_time(q),
572 length_to_duration(q, len));
574 /* In the case that there's no gate entry, there's no
577 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
578 ktime_after(guard, entry->close_time))
581 /* ... and no budget. */
582 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
583 atomic_sub_return(len, &entry->budget) < 0)
586 skb = child->ops->dequeue(child);
591 qdisc_bstats_update(sch, skb);
592 qdisc_qstats_backlog_dec(sch, skb);
604 static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
606 struct taprio_sched *q = qdisc_priv(sch);
607 struct net_device *dev = qdisc_dev(sch);
611 for (i = 0; i < dev->num_tx_queues; i++) {
612 struct Qdisc *child = q->qdiscs[i];
614 if (unlikely(!child))
617 skb = child->ops->dequeue(child);
621 qdisc_bstats_update(sch, skb);
622 qdisc_qstats_backlog_dec(sch, skb);
631 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
633 struct taprio_sched *q = qdisc_priv(sch);
635 return q->dequeue(sch);
638 static bool should_restart_cycle(const struct sched_gate_list *oper,
639 const struct sched_entry *entry)
641 if (list_is_last(&entry->list, &oper->entries))
644 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
650 static bool should_change_schedules(const struct sched_gate_list *admin,
651 const struct sched_gate_list *oper,
654 ktime_t next_base_time, extension_time;
659 next_base_time = sched_base_time(admin);
661 /* This is the simple case, the close_time would fall after
662 * the next schedule base_time.
664 if (ktime_compare(next_base_time, close_time) <= 0)
667 /* This is the cycle_time_extension case, if the close_time
668 * plus the amount that can be extended would fall after the
669 * next schedule base_time, we can extend the current schedule
672 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
674 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
675 * how precisely the extension should be made. So after
676 * conformance testing, this logic may change.
678 if (ktime_compare(next_base_time, extension_time) <= 0)
684 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
686 struct taprio_sched *q = container_of(timer, struct taprio_sched,
688 struct sched_gate_list *oper, *admin;
689 struct sched_entry *entry, *next;
690 struct Qdisc *sch = q->root;
693 spin_lock(&q->current_entry_lock);
694 entry = rcu_dereference_protected(q->current_entry,
695 lockdep_is_held(&q->current_entry_lock));
696 oper = rcu_dereference_protected(q->oper_sched,
697 lockdep_is_held(&q->current_entry_lock));
698 admin = rcu_dereference_protected(q->admin_sched,
699 lockdep_is_held(&q->current_entry_lock));
702 switch_schedules(q, &admin, &oper);
704 /* This can happen in two cases: 1. this is the very first run
705 * of this function (i.e. we weren't running any schedule
706 * previously); 2. The previous schedule just ended. The first
707 * entry of all schedules are pre-calculated during the
708 * schedule initialization.
710 if (unlikely(!entry || entry->close_time == oper->base_time)) {
711 next = list_first_entry(&oper->entries, struct sched_entry,
713 close_time = next->close_time;
717 if (should_restart_cycle(oper, entry)) {
718 next = list_first_entry(&oper->entries, struct sched_entry,
720 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
723 next = list_next_entry(entry, list);
726 close_time = ktime_add_ns(entry->close_time, next->interval);
727 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
729 if (should_change_schedules(admin, oper, close_time)) {
730 /* Set things so the next time this runs, the new
733 close_time = sched_base_time(admin);
734 switch_schedules(q, &admin, &oper);
737 next->close_time = close_time;
738 taprio_set_budget(q, next);
741 rcu_assign_pointer(q->current_entry, next);
742 spin_unlock(&q->current_entry_lock);
744 hrtimer_set_expires(&q->advance_timer, close_time);
747 __netif_schedule(sch);
750 return HRTIMER_RESTART;
753 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
754 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
755 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
756 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
757 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
760 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
761 [TCA_TAPRIO_ATTR_PRIOMAP] = {
762 .len = sizeof(struct tc_mqprio_qopt)
764 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
765 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
766 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
767 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
768 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
769 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
770 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
773 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
774 struct netlink_ext_ack *extack)
778 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
779 entry->command = nla_get_u8(
780 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
782 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
783 entry->gate_mask = nla_get_u32(
784 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
786 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
787 interval = nla_get_u32(
788 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
791 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
795 entry->interval = interval;
800 static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
801 int index, struct netlink_ext_ack *extack)
803 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
806 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
809 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
813 entry->index = index;
815 return fill_sched_entry(tb, entry, extack);
818 static int parse_sched_list(struct nlattr *list,
819 struct sched_gate_list *sched,
820 struct netlink_ext_ack *extack)
829 nla_for_each_nested(n, list, rem) {
830 struct sched_entry *entry;
832 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
833 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
837 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
839 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
843 err = parse_sched_entry(n, entry, i, extack);
849 list_add_tail(&entry->list, &sched->entries);
853 sched->num_entries = i;
858 static int parse_taprio_schedule(struct nlattr **tb,
859 struct sched_gate_list *new,
860 struct netlink_ext_ack *extack)
864 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
865 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
869 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
870 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
872 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
873 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
875 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
876 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
878 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
879 err = parse_sched_list(
880 tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
884 if (!new->cycle_time) {
885 struct sched_entry *entry;
888 list_for_each_entry(entry, &new->entries, list)
889 cycle = ktime_add_ns(cycle, entry->interval);
890 new->cycle_time = cycle;
896 static int taprio_parse_mqprio_opt(struct net_device *dev,
897 struct tc_mqprio_qopt *qopt,
898 struct netlink_ext_ack *extack,
903 if (!qopt && !dev->num_tc) {
904 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
908 /* If num_tc is already set, it means that the user already
909 * configured the mqprio part
914 /* Verify num_tc is not out of max range */
915 if (qopt->num_tc > TC_MAX_QUEUE) {
916 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
920 /* taprio imposes that traffic classes map 1:n to tx queues */
921 if (qopt->num_tc > dev->num_tx_queues) {
922 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
926 /* Verify priority mapping uses valid tcs */
927 for (i = 0; i <= TC_BITMASK; i++) {
928 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
929 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
934 for (i = 0; i < qopt->num_tc; i++) {
935 unsigned int last = qopt->offset[i] + qopt->count[i];
937 /* Verify the queue count is in tx range being equal to the
938 * real_num_tx_queues indicates the last queue is in use.
940 if (qopt->offset[i] >= dev->num_tx_queues ||
942 last > dev->real_num_tx_queues) {
943 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
947 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
950 /* Verify that the offset and counts do not overlap */
951 for (j = i + 1; j < qopt->num_tc; j++) {
952 if (last > qopt->offset[j]) {
953 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
962 static int taprio_get_start_time(struct Qdisc *sch,
963 struct sched_gate_list *sched,
966 struct taprio_sched *q = qdisc_priv(sch);
967 ktime_t now, base, cycle;
970 base = sched_base_time(sched);
971 now = taprio_get_time(q);
973 if (ktime_after(base, now)) {
978 cycle = sched->cycle_time;
980 /* The qdisc is expected to have at least one sched_entry. Moreover,
981 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
982 * something went really wrong. In that case, we should warn about this
983 * inconsistent state and return error.
988 /* Schedule the start time for the beginning of the next
991 n = div64_s64(ktime_sub_ns(now, base), cycle);
992 *start = ktime_add_ns(base, (n + 1) * cycle);
996 static void setup_first_close_time(struct taprio_sched *q,
997 struct sched_gate_list *sched, ktime_t base)
999 struct sched_entry *first;
1002 first = list_first_entry(&sched->entries,
1003 struct sched_entry, list);
1005 cycle = sched->cycle_time;
1007 /* FIXME: find a better place to do this */
1008 sched->cycle_close_time = ktime_add_ns(base, cycle);
1010 first->close_time = ktime_add_ns(base, first->interval);
1011 taprio_set_budget(q, first);
1012 rcu_assign_pointer(q->current_entry, NULL);
1015 static void taprio_start_sched(struct Qdisc *sch,
1016 ktime_t start, struct sched_gate_list *new)
1018 struct taprio_sched *q = qdisc_priv(sch);
1021 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1024 expires = hrtimer_get_expires(&q->advance_timer);
1026 expires = KTIME_MAX;
1028 /* If the new schedule starts before the next expiration, we
1029 * reprogram it to the earliest one, so we change the admin
1030 * schedule to the operational one at the right time.
1032 start = min_t(ktime_t, start, expires);
1034 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1037 static void taprio_set_picos_per_byte(struct net_device *dev,
1038 struct taprio_sched *q)
1040 struct ethtool_link_ksettings ecmd;
1041 int speed = SPEED_10;
1045 err = __ethtool_get_link_ksettings(dev, &ecmd);
1049 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1050 speed = ecmd.base.speed;
1053 picos_per_byte = (USEC_PER_SEC * 8) / speed;
1055 atomic64_set(&q->picos_per_byte, picos_per_byte);
1056 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1057 dev->name, (long long)atomic64_read(&q->picos_per_byte),
1061 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1064 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1065 struct net_device *qdev;
1066 struct taprio_sched *q;
1071 if (event != NETDEV_UP && event != NETDEV_CHANGE)
1074 spin_lock(&taprio_list_lock);
1075 list_for_each_entry(q, &taprio_list, taprio_list) {
1076 qdev = qdisc_dev(q->root);
1082 spin_unlock(&taprio_list_lock);
1085 taprio_set_picos_per_byte(dev, q);
1090 static void setup_txtime(struct taprio_sched *q,
1091 struct sched_gate_list *sched, ktime_t base)
1093 struct sched_entry *entry;
1096 list_for_each_entry(entry, &sched->entries, list) {
1097 entry->next_txtime = ktime_add_ns(base, interval);
1098 interval += entry->interval;
1102 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1104 size_t size = sizeof(struct tc_taprio_sched_entry) * num_entries +
1105 sizeof(struct __tc_taprio_qopt_offload);
1106 struct __tc_taprio_qopt_offload *__offload;
1108 __offload = kzalloc(size, GFP_KERNEL);
1112 refcount_set(&__offload->users, 1);
1114 return &__offload->offload;
1117 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1120 struct __tc_taprio_qopt_offload *__offload;
1122 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1125 refcount_inc(&__offload->users);
1129 EXPORT_SYMBOL_GPL(taprio_offload_get);
1131 void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1133 struct __tc_taprio_qopt_offload *__offload;
1135 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1138 if (!refcount_dec_and_test(&__offload->users))
1143 EXPORT_SYMBOL_GPL(taprio_offload_free);
1145 /* The function will only serve to keep the pointers to the "oper" and "admin"
1146 * schedules valid in relation to their base times, so when calling dump() the
1147 * users looks at the right schedules.
1148 * When using full offload, the admin configuration is promoted to oper at the
1149 * base_time in the PHC time domain. But because the system time is not
1150 * necessarily in sync with that, we can't just trigger a hrtimer to call
1151 * switch_schedules at the right hardware time.
1152 * At the moment we call this by hand right away from taprio, but in the future
1153 * it will be useful to create a mechanism for drivers to notify taprio of the
1154 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1155 * This is left as TODO.
1157 static void taprio_offload_config_changed(struct taprio_sched *q)
1159 struct sched_gate_list *oper, *admin;
1161 spin_lock(&q->current_entry_lock);
1163 oper = rcu_dereference_protected(q->oper_sched,
1164 lockdep_is_held(&q->current_entry_lock));
1165 admin = rcu_dereference_protected(q->admin_sched,
1166 lockdep_is_held(&q->current_entry_lock));
1168 switch_schedules(q, &admin, &oper);
1170 spin_unlock(&q->current_entry_lock);
1173 static void taprio_sched_to_offload(struct taprio_sched *q,
1174 struct sched_gate_list *sched,
1175 const struct tc_mqprio_qopt *mqprio,
1176 struct tc_taprio_qopt_offload *offload)
1178 struct sched_entry *entry;
1181 offload->base_time = sched->base_time;
1182 offload->cycle_time = sched->cycle_time;
1183 offload->cycle_time_extension = sched->cycle_time_extension;
1185 list_for_each_entry(entry, &sched->entries, list) {
1186 struct tc_taprio_sched_entry *e = &offload->entries[i];
1188 e->command = entry->command;
1189 e->interval = entry->interval;
1190 e->gate_mask = entry->gate_mask;
1194 offload->num_entries = i;
1197 static int taprio_enable_offload(struct net_device *dev,
1198 struct tc_mqprio_qopt *mqprio,
1199 struct taprio_sched *q,
1200 struct sched_gate_list *sched,
1201 struct netlink_ext_ack *extack)
1203 const struct net_device_ops *ops = dev->netdev_ops;
1204 struct tc_taprio_qopt_offload *offload;
1207 if (!ops->ndo_setup_tc) {
1208 NL_SET_ERR_MSG(extack,
1209 "Device does not support taprio offload");
1213 offload = taprio_offload_alloc(sched->num_entries);
1215 NL_SET_ERR_MSG(extack,
1216 "Not enough memory for enabling offload mode");
1219 offload->enable = 1;
1220 taprio_sched_to_offload(q, sched, mqprio, offload);
1222 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1224 NL_SET_ERR_MSG(extack,
1225 "Device failed to setup taprio offload");
1230 taprio_offload_free(offload);
1235 static int taprio_disable_offload(struct net_device *dev,
1236 struct taprio_sched *q,
1237 struct netlink_ext_ack *extack)
1239 const struct net_device_ops *ops = dev->netdev_ops;
1240 struct tc_taprio_qopt_offload *offload;
1243 if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
1246 if (!ops->ndo_setup_tc)
1249 offload = taprio_offload_alloc(0);
1251 NL_SET_ERR_MSG(extack,
1252 "Not enough memory to disable offload mode");
1255 offload->enable = 0;
1257 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1259 NL_SET_ERR_MSG(extack,
1260 "Device failed to disable offload");
1265 taprio_offload_free(offload);
1270 /* If full offload is enabled, the only possible clockid is the net device's
1271 * PHC. For that reason, specifying a clockid through netlink is incorrect.
1272 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1273 * in sync with the specified clockid via a user space daemon such as phc2sys.
1274 * For both software taprio and txtime-assist, the clockid is used for the
1275 * hrtimer that advances the schedule and hence mandatory.
1277 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1278 struct netlink_ext_ack *extack)
1280 struct taprio_sched *q = qdisc_priv(sch);
1281 struct net_device *dev = qdisc_dev(sch);
1284 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1285 const struct ethtool_ops *ops = dev->ethtool_ops;
1286 struct ethtool_ts_info info = {
1287 .cmd = ETHTOOL_GET_TS_INFO,
1291 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1292 NL_SET_ERR_MSG(extack,
1293 "The 'clockid' cannot be specified for full offload");
1297 if (ops && ops->get_ts_info)
1298 err = ops->get_ts_info(dev, &info);
1300 if (err || info.phc_index < 0) {
1301 NL_SET_ERR_MSG(extack,
1302 "Device does not have a PTP clock");
1306 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1307 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1309 /* We only support static clockids and we don't allow
1310 * for it to be modified after the first init.
1313 (q->clockid != -1 && q->clockid != clockid)) {
1314 NL_SET_ERR_MSG(extack,
1315 "Changing the 'clockid' of a running schedule is not supported");
1321 case CLOCK_REALTIME:
1322 q->tk_offset = TK_OFFS_REAL;
1324 case CLOCK_MONOTONIC:
1325 q->tk_offset = TK_OFFS_MAX;
1327 case CLOCK_BOOTTIME:
1328 q->tk_offset = TK_OFFS_BOOT;
1331 q->tk_offset = TK_OFFS_TAI;
1334 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1339 q->clockid = clockid;
1341 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1345 /* Everything went ok, return success. */
1352 static int taprio_mqprio_cmp(const struct net_device *dev,
1353 const struct tc_mqprio_qopt *mqprio)
1357 if (!mqprio || mqprio->num_tc != dev->num_tc)
1360 for (i = 0; i < mqprio->num_tc; i++)
1361 if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1362 dev->tc_to_txq[i].offset != mqprio->offset[i])
1365 for (i = 0; i <= TC_BITMASK; i++)
1366 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1372 /* The semantics of the 'flags' argument in relation to 'change()'
1373 * requests, are interpreted following two rules (which are applied in
1374 * this order): (1) an omitted 'flags' argument is interpreted as
1375 * zero; (2) the 'flags' of a "running" taprio instance cannot be
1378 static int taprio_new_flags(const struct nlattr *attr, u32 old,
1379 struct netlink_ext_ack *extack)
1384 new = nla_get_u32(attr);
1386 if (old != TAPRIO_FLAGS_INVALID && old != new) {
1387 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1391 if (!taprio_flags_valid(new)) {
1392 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1399 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1400 struct netlink_ext_ack *extack)
1402 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1403 struct sched_gate_list *oper, *admin, *new_admin;
1404 struct taprio_sched *q = qdisc_priv(sch);
1405 struct net_device *dev = qdisc_dev(sch);
1406 struct tc_mqprio_qopt *mqprio = NULL;
1407 unsigned long flags;
1411 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1412 taprio_policy, extack);
1416 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1417 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1419 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1426 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
1430 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1432 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1435 INIT_LIST_HEAD(&new_admin->entries);
1438 oper = rcu_dereference(q->oper_sched);
1439 admin = rcu_dereference(q->admin_sched);
1442 /* no changes - no new mqprio settings */
1443 if (!taprio_mqprio_cmp(dev, mqprio))
1446 if (mqprio && (oper || admin)) {
1447 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1452 err = parse_taprio_schedule(tb, new_admin, extack);
1456 if (new_admin->num_entries == 0) {
1457 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1462 err = taprio_parse_clockid(sch, tb, extack);
1466 taprio_set_picos_per_byte(dev, q);
1469 netdev_set_num_tc(dev, mqprio->num_tc);
1470 for (i = 0; i < mqprio->num_tc; i++)
1471 netdev_set_tc_queue(dev, i,
1475 /* Always use supplied priority mappings */
1476 for (i = 0; i <= TC_BITMASK; i++)
1477 netdev_set_prio_tc_map(dev, i,
1478 mqprio->prio_tc_map[i]);
1481 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1482 err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
1484 err = taprio_disable_offload(dev, q, extack);
1488 /* Protects against enqueue()/dequeue() */
1489 spin_lock_bh(qdisc_lock(sch));
1491 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1492 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1493 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1498 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1501 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1502 !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1503 !hrtimer_active(&q->advance_timer)) {
1504 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1505 q->advance_timer.function = advance_sched;
1508 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1509 q->dequeue = taprio_dequeue_offload;
1510 q->peek = taprio_peek_offload;
1512 /* Be sure to always keep the function pointers
1513 * in a consistent state.
1515 q->dequeue = taprio_dequeue_soft;
1516 q->peek = taprio_peek_soft;
1519 err = taprio_get_start_time(sch, new_admin, &start);
1521 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1525 setup_txtime(q, new_admin, start);
1527 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1529 rcu_assign_pointer(q->oper_sched, new_admin);
1535 rcu_assign_pointer(q->admin_sched, new_admin);
1537 call_rcu(&admin->rcu, taprio_free_sched_cb);
1539 setup_first_close_time(q, new_admin, start);
1541 /* Protects against advance_sched() */
1542 spin_lock_irqsave(&q->current_entry_lock, flags);
1544 taprio_start_sched(sch, start, new_admin);
1546 rcu_assign_pointer(q->admin_sched, new_admin);
1548 call_rcu(&admin->rcu, taprio_free_sched_cb);
1550 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1552 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1553 taprio_offload_config_changed(q);
1560 spin_unlock_bh(qdisc_lock(sch));
1564 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1569 static void taprio_destroy(struct Qdisc *sch)
1571 struct taprio_sched *q = qdisc_priv(sch);
1572 struct net_device *dev = qdisc_dev(sch);
1575 spin_lock(&taprio_list_lock);
1576 list_del(&q->taprio_list);
1577 spin_unlock(&taprio_list_lock);
1579 hrtimer_cancel(&q->advance_timer);
1581 taprio_disable_offload(dev, q, NULL);
1584 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
1585 qdisc_put(q->qdiscs[i]);
1591 netdev_reset_tc(dev);
1594 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1597 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
1600 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1601 struct netlink_ext_ack *extack)
1603 struct taprio_sched *q = qdisc_priv(sch);
1604 struct net_device *dev = qdisc_dev(sch);
1607 spin_lock_init(&q->current_entry_lock);
1609 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1610 q->advance_timer.function = advance_sched;
1612 q->dequeue = taprio_dequeue_soft;
1613 q->peek = taprio_peek_soft;
1617 /* We only support static clockids. Use an invalid value as default
1618 * and get the valid one on taprio_change().
1621 q->flags = TAPRIO_FLAGS_INVALID;
1623 spin_lock(&taprio_list_lock);
1624 list_add(&q->taprio_list, &taprio_list);
1625 spin_unlock(&taprio_list_lock);
1627 if (sch->parent != TC_H_ROOT)
1630 if (!netif_is_multiqueue(dev))
1633 /* pre-allocate qdisc, attachment can't fail */
1634 q->qdiscs = kcalloc(dev->num_tx_queues,
1635 sizeof(q->qdiscs[0]),
1644 for (i = 0; i < dev->num_tx_queues; i++) {
1645 struct netdev_queue *dev_queue;
1646 struct Qdisc *qdisc;
1648 dev_queue = netdev_get_tx_queue(dev, i);
1649 qdisc = qdisc_create_dflt(dev_queue,
1651 TC_H_MAKE(TC_H_MAJ(sch->handle),
1657 if (i < dev->real_num_tx_queues)
1658 qdisc_hash_add(qdisc, false);
1660 q->qdiscs[i] = qdisc;
1663 return taprio_change(sch, opt, extack);
1666 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1669 struct net_device *dev = qdisc_dev(sch);
1670 unsigned long ntx = cl - 1;
1672 if (ntx >= dev->num_tx_queues)
1675 return netdev_get_tx_queue(dev, ntx);
1678 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1679 struct Qdisc *new, struct Qdisc **old,
1680 struct netlink_ext_ack *extack)
1682 struct taprio_sched *q = qdisc_priv(sch);
1683 struct net_device *dev = qdisc_dev(sch);
1684 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1689 if (dev->flags & IFF_UP)
1690 dev_deactivate(dev);
1692 *old = q->qdiscs[cl - 1];
1693 q->qdiscs[cl - 1] = new;
1696 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1698 if (dev->flags & IFF_UP)
1704 static int dump_entry(struct sk_buff *msg,
1705 const struct sched_entry *entry)
1707 struct nlattr *item;
1709 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
1713 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1714 goto nla_put_failure;
1716 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1717 goto nla_put_failure;
1719 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1721 goto nla_put_failure;
1723 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1725 goto nla_put_failure;
1727 return nla_nest_end(msg, item);
1730 nla_nest_cancel(msg, item);
1734 static int dump_schedule(struct sk_buff *msg,
1735 const struct sched_gate_list *root)
1737 struct nlattr *entry_list;
1738 struct sched_entry *entry;
1740 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1741 root->base_time, TCA_TAPRIO_PAD))
1744 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1745 root->cycle_time, TCA_TAPRIO_PAD))
1748 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1749 root->cycle_time_extension, TCA_TAPRIO_PAD))
1752 entry_list = nla_nest_start_noflag(msg,
1753 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1757 list_for_each_entry(entry, &root->entries, list) {
1758 if (dump_entry(msg, entry) < 0)
1762 nla_nest_end(msg, entry_list);
1766 nla_nest_cancel(msg, entry_list);
1770 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1772 struct taprio_sched *q = qdisc_priv(sch);
1773 struct net_device *dev = qdisc_dev(sch);
1774 struct sched_gate_list *oper, *admin;
1775 struct tc_mqprio_qopt opt = { 0 };
1776 struct nlattr *nest, *sched_nest;
1780 oper = rcu_dereference(q->oper_sched);
1781 admin = rcu_dereference(q->admin_sched);
1783 opt.num_tc = netdev_get_num_tc(dev);
1784 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1786 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1787 opt.count[i] = dev->tc_to_txq[i].count;
1788 opt.offset[i] = dev->tc_to_txq[i].offset;
1791 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1795 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1798 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1799 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1802 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1805 if (q->txtime_delay &&
1806 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
1809 if (oper && dump_schedule(skb, oper))
1815 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1819 if (dump_schedule(skb, admin))
1822 nla_nest_end(skb, sched_nest);
1827 return nla_nest_end(skb, nest);
1830 nla_nest_cancel(skb, sched_nest);
1833 nla_nest_cancel(skb, nest);
1840 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1842 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1847 return dev_queue->qdisc_sleeping;
1850 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1852 unsigned int ntx = TC_H_MIN(classid);
1854 if (!taprio_queue_get(sch, ntx))
1859 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1860 struct sk_buff *skb, struct tcmsg *tcm)
1862 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1864 tcm->tcm_parent = TC_H_ROOT;
1865 tcm->tcm_handle |= TC_H_MIN(cl);
1866 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1871 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1872 struct gnet_dump *d)
1876 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1878 sch = dev_queue->qdisc_sleeping;
1879 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1880 qdisc_qstats_copy(d, sch) < 0)
1885 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1887 struct net_device *dev = qdisc_dev(sch);
1893 arg->count = arg->skip;
1894 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1895 if (arg->fn(sch, ntx + 1, arg) < 0) {
1903 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
1906 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
1909 static const struct Qdisc_class_ops taprio_class_ops = {
1910 .graft = taprio_graft,
1911 .leaf = taprio_leaf,
1912 .find = taprio_find,
1913 .walk = taprio_walk,
1914 .dump = taprio_dump_class,
1915 .dump_stats = taprio_dump_class_stats,
1916 .select_queue = taprio_select_queue,
1919 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
1920 .cl_ops = &taprio_class_ops,
1922 .priv_size = sizeof(struct taprio_sched),
1923 .init = taprio_init,
1924 .change = taprio_change,
1925 .destroy = taprio_destroy,
1926 .peek = taprio_peek,
1927 .dequeue = taprio_dequeue,
1928 .enqueue = taprio_enqueue,
1929 .dump = taprio_dump,
1930 .owner = THIS_MODULE,
1933 static struct notifier_block taprio_device_notifier = {
1934 .notifier_call = taprio_dev_notifier,
1937 static int __init taprio_module_init(void)
1939 int err = register_netdevice_notifier(&taprio_device_notifier);
1944 return register_qdisc(&taprio_qdisc_ops);
1947 static void __exit taprio_module_exit(void)
1949 unregister_qdisc(&taprio_qdisc_ops);
1950 unregister_netdevice_notifier(&taprio_device_notifier);
1953 module_init(taprio_module_init);
1954 module_exit(taprio_module_exit);
1955 MODULE_LICENSE("GPL");