1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/ethtool.h>
10 #include <linux/ethtool_netlink.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/list.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/time.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
27 #include <net/sch_generic.h>
31 #define TAPRIO_STAT_NOT_SET (~0ULL)
33 #include "sch_mqprio_lib.h"
35 static LIST_HEAD(taprio_list);
36 static struct static_key_false taprio_have_broken_mqprio;
37 static struct static_key_false taprio_have_working_mqprio;
39 #define TAPRIO_ALL_GATES_OPEN -1
41 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
42 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
43 #define TAPRIO_SUPPORTED_FLAGS \
44 (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
45 #define TAPRIO_FLAGS_INVALID U32_MAX
48 /* Durations between this GCL entry and the GCL entry where the
49 * respective traffic class gate closes
51 u64 gate_duration[TC_MAX_QUEUE];
52 atomic_t budget[TC_MAX_QUEUE];
53 /* The qdisc makes some effort so that no packet leaves
56 ktime_t gate_close_time[TC_MAX_QUEUE];
57 struct list_head list;
58 /* Used to calculate when to advance the schedule */
67 struct sched_gate_list {
68 /* Longest non-zero contiguous gate durations per traffic class,
69 * or 0 if a traffic class gate never opens during the schedule.
71 u64 max_open_gate_duration[TC_MAX_QUEUE];
72 u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
73 u32 max_sdu[TC_MAX_QUEUE]; /* for dump */
75 struct list_head entries;
77 ktime_t cycle_end_time;
79 s64 cycle_time_extension;
84 struct Qdisc **qdiscs;
87 enum tk_offsets tk_offset;
92 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
93 * speeds it's sub-nanoseconds per byte
96 /* Protects the update side of the RCU protected current_entry */
97 spinlock_t current_entry_lock;
98 struct sched_entry __rcu *current_entry;
99 struct sched_gate_list __rcu *oper_sched;
100 struct sched_gate_list __rcu *admin_sched;
101 struct hrtimer advance_timer;
102 struct list_head taprio_list;
103 int cur_txq[TC_MAX_QUEUE];
104 u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */
105 u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */
109 struct __tc_taprio_qopt_offload {
111 struct tc_taprio_qopt_offload offload;
114 static void taprio_calculate_gate_durations(struct taprio_sched *q,
115 struct sched_gate_list *sched)
117 struct net_device *dev = qdisc_dev(q->root);
118 int num_tc = netdev_get_num_tc(dev);
119 struct sched_entry *entry, *cur;
122 list_for_each_entry(entry, &sched->entries, list) {
123 u32 gates_still_open = entry->gate_mask;
125 /* For each traffic class, calculate each open gate duration,
126 * starting at this schedule entry and ending at the schedule
127 * entry containing a gate close event for that TC.
132 if (!gates_still_open)
135 for (tc = 0; tc < num_tc; tc++) {
136 if (!(gates_still_open & BIT(tc)))
139 if (cur->gate_mask & BIT(tc))
140 entry->gate_duration[tc] += cur->interval;
142 gates_still_open &= ~BIT(tc);
145 cur = list_next_entry_circular(cur, &sched->entries, list);
146 } while (cur != entry);
148 /* Keep track of the maximum gate duration for each traffic
149 * class, taking care to not confuse a traffic class which is
150 * temporarily closed with one that is always closed.
152 for (tc = 0; tc < num_tc; tc++)
153 if (entry->gate_duration[tc] &&
154 sched->max_open_gate_duration[tc] < entry->gate_duration[tc])
155 sched->max_open_gate_duration[tc] = entry->gate_duration[tc];
159 static bool taprio_entry_allows_tx(ktime_t skb_end_time,
160 struct sched_entry *entry, int tc)
162 return ktime_before(skb_end_time, entry->gate_close_time[tc]);
165 static ktime_t sched_base_time(const struct sched_gate_list *sched)
170 return ns_to_ktime(sched->base_time);
173 static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
175 /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
176 enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
182 return ktime_mono_to_any(mono, tk_offset);
186 static ktime_t taprio_get_time(const struct taprio_sched *q)
188 return taprio_mono_to_any(q, ktime_get());
191 static void taprio_free_sched_cb(struct rcu_head *head)
193 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
194 struct sched_entry *entry, *n;
196 list_for_each_entry_safe(entry, n, &sched->entries, list) {
197 list_del(&entry->list);
204 static void switch_schedules(struct taprio_sched *q,
205 struct sched_gate_list **admin,
206 struct sched_gate_list **oper)
208 rcu_assign_pointer(q->oper_sched, *admin);
209 rcu_assign_pointer(q->admin_sched, NULL);
212 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
218 /* Get how much time has been already elapsed in the current cycle. */
219 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
221 ktime_t time_since_sched_start;
224 time_since_sched_start = ktime_sub(time, sched->base_time);
225 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
230 static ktime_t get_interval_end_time(struct sched_gate_list *sched,
231 struct sched_gate_list *admin,
232 struct sched_entry *entry,
235 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
236 ktime_t intv_end, cycle_ext_end, cycle_end;
238 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
239 intv_end = ktime_add_ns(intv_start, entry->interval);
240 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
242 if (ktime_before(intv_end, cycle_end))
244 else if (admin && admin != sched &&
245 ktime_after(admin->base_time, cycle_end) &&
246 ktime_before(admin->base_time, cycle_ext_end))
247 return admin->base_time;
252 static int length_to_duration(struct taprio_sched *q, int len)
254 return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
257 static int duration_to_length(struct taprio_sched *q, u64 duration)
259 return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte));
262 /* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the
263 * q->max_sdu[] requested by the user and the max_sdu dynamically determined by
264 * the maximum open gate durations at the given link speed.
266 static void taprio_update_queue_max_sdu(struct taprio_sched *q,
267 struct sched_gate_list *sched,
268 struct qdisc_size_table *stab)
270 struct net_device *dev = qdisc_dev(q->root);
271 int num_tc = netdev_get_num_tc(dev);
272 u32 max_sdu_from_user;
277 for (tc = 0; tc < num_tc; tc++) {
278 max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX;
280 /* TC gate never closes => keep the queueMaxSDU
281 * selected by the user
283 if (sched->max_open_gate_duration[tc] == sched->cycle_time) {
284 max_sdu_dynamic = U32_MAX;
288 max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]);
289 /* Compensate for L1 overhead from size table,
290 * but don't let the frame size go negative
293 max_frm_len -= stab->szopts.overhead;
294 max_frm_len = max_t(int, max_frm_len,
295 dev->hard_header_len + 1);
297 max_sdu_dynamic = max_frm_len - dev->hard_header_len;
298 if (max_sdu_dynamic > dev->max_mtu)
299 max_sdu_dynamic = U32_MAX;
302 max_sdu = min(max_sdu_dynamic, max_sdu_from_user);
304 if (max_sdu != U32_MAX) {
305 sched->max_frm_len[tc] = max_sdu + dev->hard_header_len;
306 sched->max_sdu[tc] = max_sdu;
308 sched->max_frm_len[tc] = U32_MAX; /* never oversized */
309 sched->max_sdu[tc] = 0;
314 /* Returns the entry corresponding to next available interval. If
315 * validate_interval is set, it only validates whether the timestamp occurs
316 * when the gate corresponding to the skb's traffic class is open.
318 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
320 struct sched_gate_list *sched,
321 struct sched_gate_list *admin,
323 ktime_t *interval_start,
324 ktime_t *interval_end,
325 bool validate_interval)
327 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
328 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
329 struct sched_entry *entry = NULL, *entry_found = NULL;
330 struct taprio_sched *q = qdisc_priv(sch);
331 struct net_device *dev = qdisc_dev(sch);
332 bool entry_available = false;
336 tc = netdev_get_prio_tc_map(dev, skb->priority);
337 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
345 cycle = sched->cycle_time;
346 cycle_elapsed = get_cycle_time_elapsed(sched, time);
347 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
348 cycle_end = ktime_add_ns(curr_intv_end, cycle);
350 list_for_each_entry(entry, &sched->entries, list) {
351 curr_intv_start = curr_intv_end;
352 curr_intv_end = get_interval_end_time(sched, admin, entry,
355 if (ktime_after(curr_intv_start, cycle_end))
358 if (!(entry->gate_mask & BIT(tc)) ||
359 packet_transmit_time > entry->interval)
362 txtime = entry->next_txtime;
364 if (ktime_before(txtime, time) || validate_interval) {
365 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
366 if ((ktime_before(curr_intv_start, time) &&
367 ktime_before(transmit_end_time, curr_intv_end)) ||
368 (ktime_after(curr_intv_start, time) && !validate_interval)) {
370 *interval_start = curr_intv_start;
371 *interval_end = curr_intv_end;
373 } else if (!entry_available && !validate_interval) {
374 /* Here, we are just trying to find out the
375 * first available interval in the next cycle.
377 entry_available = true;
379 *interval_start = ktime_add_ns(curr_intv_start, cycle);
380 *interval_end = ktime_add_ns(curr_intv_end, cycle);
382 } else if (ktime_before(txtime, earliest_txtime) &&
384 earliest_txtime = txtime;
386 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
387 *interval_start = ktime_add(curr_intv_start, n * cycle);
388 *interval_end = ktime_add(curr_intv_end, n * cycle);
395 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
397 struct taprio_sched *q = qdisc_priv(sch);
398 struct sched_gate_list *sched, *admin;
399 ktime_t interval_start, interval_end;
400 struct sched_entry *entry;
403 sched = rcu_dereference(q->oper_sched);
404 admin = rcu_dereference(q->admin_sched);
406 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
407 &interval_start, &interval_end, true);
413 /* This returns the tstamp value set by TCP in terms of the set clock. */
414 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
416 unsigned int offset = skb_network_offset(skb);
417 const struct ipv6hdr *ipv6h;
418 const struct iphdr *iph;
419 struct ipv6hdr _ipv6h;
421 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
425 if (ipv6h->version == 4) {
426 iph = (struct iphdr *)ipv6h;
427 offset += iph->ihl * 4;
429 /* special-case 6in4 tunnelling, as that is a common way to get
430 * v6 connectivity in the home
432 if (iph->protocol == IPPROTO_IPV6) {
433 ipv6h = skb_header_pointer(skb, offset,
434 sizeof(_ipv6h), &_ipv6h);
436 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
438 } else if (iph->protocol != IPPROTO_TCP) {
441 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
445 return taprio_mono_to_any(q, skb->skb_mstamp_ns);
448 /* There are a few scenarios where we will have to modify the txtime from
449 * what is read from next_txtime in sched_entry. They are:
450 * 1. If txtime is in the past,
451 * a. The gate for the traffic class is currently open and packet can be
452 * transmitted before it closes, schedule the packet right away.
453 * b. If the gate corresponding to the traffic class is going to open later
454 * in the cycle, set the txtime of packet to the interval start.
455 * 2. If txtime is in the future, there are packets corresponding to the
456 * current traffic class waiting to be transmitted. So, the following
457 * possibilities exist:
458 * a. We can transmit the packet before the window containing the txtime
460 * b. The window might close before the transmission can be completed
461 * successfully. So, schedule the packet in the next open window.
463 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
465 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
466 struct taprio_sched *q = qdisc_priv(sch);
467 struct sched_gate_list *sched, *admin;
468 ktime_t minimum_time, now, txtime;
469 int len, packet_transmit_time;
470 struct sched_entry *entry;
473 now = taprio_get_time(q);
474 minimum_time = ktime_add_ns(now, q->txtime_delay);
476 tcp_tstamp = get_tcp_tstamp(q, skb);
477 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
480 admin = rcu_dereference(q->admin_sched);
481 sched = rcu_dereference(q->oper_sched);
482 if (admin && ktime_after(minimum_time, admin->base_time))
483 switch_schedules(q, &admin, &sched);
485 /* Until the schedule starts, all the queues are open */
486 if (!sched || ktime_before(minimum_time, sched->base_time)) {
487 txtime = minimum_time;
491 len = qdisc_pkt_len(skb);
492 packet_transmit_time = length_to_duration(q, len);
495 sched_changed = false;
497 entry = find_entry_to_transmit(skb, sch, sched, admin,
499 &interval_start, &interval_end,
506 txtime = entry->next_txtime;
507 txtime = max_t(ktime_t, txtime, minimum_time);
508 txtime = max_t(ktime_t, txtime, interval_start);
510 if (admin && admin != sched &&
511 ktime_after(txtime, admin->base_time)) {
513 sched_changed = true;
517 transmit_end_time = ktime_add(txtime, packet_transmit_time);
518 minimum_time = transmit_end_time;
520 /* Update the txtime of current entry to the next time it's
523 if (ktime_after(transmit_end_time, interval_end))
524 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
525 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
527 entry->next_txtime = transmit_end_time;
534 /* Devices with full offload are expected to honor this in hardware */
535 static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch,
538 struct taprio_sched *q = qdisc_priv(sch);
539 struct net_device *dev = qdisc_dev(sch);
540 struct sched_gate_list *sched;
541 int prio = skb->priority;
542 bool exceeds = false;
545 tc = netdev_get_prio_tc_map(dev, prio);
548 sched = rcu_dereference(q->oper_sched);
549 if (sched && skb->len > sched->max_frm_len[tc])
556 static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
557 struct Qdisc *child, struct sk_buff **to_free)
559 struct taprio_sched *q = qdisc_priv(sch);
561 /* sk_flags are only safe to use on full sockets. */
562 if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
563 if (!is_valid_interval(skb, sch))
564 return qdisc_drop(skb, sch, to_free);
565 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
566 skb->tstamp = get_packet_txtime(skb, sch);
568 return qdisc_drop(skb, sch, to_free);
571 qdisc_qstats_backlog_inc(sch, skb);
574 return qdisc_enqueue(skb, child, to_free);
577 static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
579 struct sk_buff **to_free)
581 unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
582 netdev_features_t features = netif_skb_features(skb);
583 struct sk_buff *segs, *nskb;
586 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
587 if (IS_ERR_OR_NULL(segs))
588 return qdisc_drop(skb, sch, to_free);
590 skb_list_walk_safe(segs, segs, nskb) {
591 skb_mark_not_on_list(segs);
592 qdisc_skb_cb(segs)->pkt_len = segs->len;
595 /* FIXME: we should be segmenting to a smaller size
596 * rather than dropping these
598 if (taprio_skb_exceeds_queue_max_sdu(sch, segs))
599 ret = qdisc_drop(segs, sch, to_free);
601 ret = taprio_enqueue_one(segs, sch, child, to_free);
603 if (ret != NET_XMIT_SUCCESS) {
604 if (net_xmit_drop_count(ret))
605 qdisc_qstats_drop(sch);
612 qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
615 return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
618 /* Will not be called in the full offload case, since the TX queues are
619 * attached to the Qdisc created using qdisc_create_dflt()
621 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
622 struct sk_buff **to_free)
624 struct taprio_sched *q = qdisc_priv(sch);
628 queue = skb_get_queue_mapping(skb);
630 child = q->qdiscs[queue];
631 if (unlikely(!child))
632 return qdisc_drop(skb, sch, to_free);
634 if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) {
635 /* Large packets might not be transmitted when the transmission
636 * duration exceeds any configured interval. Therefore, segment
637 * the skb into smaller chunks. Drivers with full offload are
638 * expected to handle this in hardware.
641 return taprio_enqueue_segmented(skb, sch, child,
644 return qdisc_drop(skb, sch, to_free);
647 return taprio_enqueue_one(skb, sch, child, to_free);
650 static struct sk_buff *taprio_peek(struct Qdisc *sch)
652 WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented");
656 static void taprio_set_budgets(struct taprio_sched *q,
657 struct sched_gate_list *sched,
658 struct sched_entry *entry)
660 struct net_device *dev = qdisc_dev(q->root);
661 int num_tc = netdev_get_num_tc(dev);
664 for (tc = 0; tc < num_tc; tc++) {
665 /* Traffic classes which never close have infinite budget */
666 if (entry->gate_duration[tc] == sched->cycle_time)
669 budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC,
670 atomic64_read(&q->picos_per_byte));
672 atomic_set(&entry->budget[tc], budget);
676 /* When an skb is sent, it consumes from the budget of all traffic classes */
677 static int taprio_update_budgets(struct sched_entry *entry, size_t len,
678 int tc_consumed, int num_tc)
680 int tc, budget, new_budget = 0;
682 for (tc = 0; tc < num_tc; tc++) {
683 budget = atomic_read(&entry->budget[tc]);
684 /* Don't consume from infinite budget */
685 if (budget == INT_MAX) {
686 if (tc == tc_consumed)
691 if (tc == tc_consumed)
692 new_budget = atomic_sub_return(len, &entry->budget[tc]);
694 atomic_sub(len, &entry->budget[tc]);
700 static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
701 struct sched_entry *entry,
704 struct taprio_sched *q = qdisc_priv(sch);
705 struct net_device *dev = qdisc_dev(sch);
706 struct Qdisc *child = q->qdiscs[txq];
707 int num_tc = netdev_get_num_tc(dev);
714 if (unlikely(!child))
717 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
718 goto skip_peek_checks;
720 skb = child->ops->peek(child);
724 prio = skb->priority;
725 tc = netdev_get_prio_tc_map(dev, prio);
727 if (!(gate_mask & BIT(tc)))
730 len = qdisc_pkt_len(skb);
731 guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len));
733 /* In the case that there's no gate entry, there's no
736 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
737 !taprio_entry_allows_tx(guard, entry, tc))
740 /* ... and no budget. */
741 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
742 taprio_update_budgets(entry, len, tc, num_tc) < 0)
746 skb = child->ops->dequeue(child);
750 qdisc_bstats_update(sch, skb);
751 qdisc_qstats_backlog_dec(sch, skb);
757 static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq)
759 int offset = dev->tc_to_txq[tc].offset;
760 int count = dev->tc_to_txq[tc].count;
763 if (*txq == offset + count)
767 /* Prioritize higher traffic classes, and select among TXQs belonging to the
768 * same TC using round robin
770 static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
771 struct sched_entry *entry,
774 struct taprio_sched *q = qdisc_priv(sch);
775 struct net_device *dev = qdisc_dev(sch);
776 int num_tc = netdev_get_num_tc(dev);
780 for (tc = num_tc - 1; tc >= 0; tc--) {
781 int first_txq = q->cur_txq[tc];
783 if (!(gate_mask & BIT(tc)))
787 skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc],
790 taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
792 if (q->cur_txq[tc] >= dev->num_tx_queues)
793 q->cur_txq[tc] = first_txq;
797 } while (q->cur_txq[tc] != first_txq);
803 /* Broken way of prioritizing smaller TXQ indices and ignoring the traffic
804 * class other than to determine whether the gate is open or not
806 static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch,
807 struct sched_entry *entry,
810 struct net_device *dev = qdisc_dev(sch);
814 for (i = 0; i < dev->num_tx_queues; i++) {
815 skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask);
823 /* Will not be called in the full offload case, since the TX queues are
824 * attached to the Qdisc created using qdisc_create_dflt()
826 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
828 struct taprio_sched *q = qdisc_priv(sch);
829 struct sk_buff *skb = NULL;
830 struct sched_entry *entry;
834 entry = rcu_dereference(q->current_entry);
835 /* if there's no entry, it means that the schedule didn't
836 * start yet, so force all gates to be open, this is in
837 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
840 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
844 if (static_branch_unlikely(&taprio_have_broken_mqprio) &&
845 !static_branch_likely(&taprio_have_working_mqprio)) {
846 /* Single NIC kind which is broken */
847 skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
848 } else if (static_branch_likely(&taprio_have_working_mqprio) &&
849 !static_branch_unlikely(&taprio_have_broken_mqprio)) {
850 /* Single NIC kind which prioritizes properly */
851 skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
853 /* Mixed NIC kinds present in system, need dynamic testing */
854 if (q->broken_mqprio)
855 skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
857 skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
866 static bool should_restart_cycle(const struct sched_gate_list *oper,
867 const struct sched_entry *entry)
869 if (list_is_last(&entry->list, &oper->entries))
872 if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0)
878 static bool should_change_schedules(const struct sched_gate_list *admin,
879 const struct sched_gate_list *oper,
882 ktime_t next_base_time, extension_time;
887 next_base_time = sched_base_time(admin);
889 /* This is the simple case, the end_time would fall after
890 * the next schedule base_time.
892 if (ktime_compare(next_base_time, end_time) <= 0)
895 /* This is the cycle_time_extension case, if the end_time
896 * plus the amount that can be extended would fall after the
897 * next schedule base_time, we can extend the current schedule
900 extension_time = ktime_add_ns(end_time, oper->cycle_time_extension);
902 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
903 * how precisely the extension should be made. So after
904 * conformance testing, this logic may change.
906 if (ktime_compare(next_base_time, extension_time) <= 0)
912 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
914 struct taprio_sched *q = container_of(timer, struct taprio_sched,
916 struct net_device *dev = qdisc_dev(q->root);
917 struct sched_gate_list *oper, *admin;
918 int num_tc = netdev_get_num_tc(dev);
919 struct sched_entry *entry, *next;
920 struct Qdisc *sch = q->root;
924 spin_lock(&q->current_entry_lock);
925 entry = rcu_dereference_protected(q->current_entry,
926 lockdep_is_held(&q->current_entry_lock));
927 oper = rcu_dereference_protected(q->oper_sched,
928 lockdep_is_held(&q->current_entry_lock));
929 admin = rcu_dereference_protected(q->admin_sched,
930 lockdep_is_held(&q->current_entry_lock));
933 switch_schedules(q, &admin, &oper);
935 /* This can happen in two cases: 1. this is the very first run
936 * of this function (i.e. we weren't running any schedule
937 * previously); 2. The previous schedule just ended. The first
938 * entry of all schedules are pre-calculated during the
939 * schedule initialization.
941 if (unlikely(!entry || entry->end_time == oper->base_time)) {
942 next = list_first_entry(&oper->entries, struct sched_entry,
944 end_time = next->end_time;
948 if (should_restart_cycle(oper, entry)) {
949 next = list_first_entry(&oper->entries, struct sched_entry,
951 oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time,
954 next = list_next_entry(entry, list);
957 end_time = ktime_add_ns(entry->end_time, next->interval);
958 end_time = min_t(ktime_t, end_time, oper->cycle_end_time);
960 for (tc = 0; tc < num_tc; tc++) {
961 if (next->gate_duration[tc] == oper->cycle_time)
962 next->gate_close_time[tc] = KTIME_MAX;
964 next->gate_close_time[tc] = ktime_add_ns(entry->end_time,
965 next->gate_duration[tc]);
968 if (should_change_schedules(admin, oper, end_time)) {
969 /* Set things so the next time this runs, the new
972 end_time = sched_base_time(admin);
973 switch_schedules(q, &admin, &oper);
976 next->end_time = end_time;
977 taprio_set_budgets(q, oper, next);
980 rcu_assign_pointer(q->current_entry, next);
981 spin_unlock(&q->current_entry_lock);
983 hrtimer_set_expires(&q->advance_timer, end_time);
986 __netif_schedule(sch);
989 return HRTIMER_RESTART;
992 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
993 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
994 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
995 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
996 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
999 static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
1000 [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 },
1001 [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 },
1002 [TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
1007 static const struct netlink_range_validation_signed taprio_cycle_time_range = {
1012 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
1013 [TCA_TAPRIO_ATTR_PRIOMAP] = {
1014 .len = sizeof(struct tc_mqprio_qopt)
1016 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
1017 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
1018 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
1019 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
1020 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] =
1021 NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
1022 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
1023 [TCA_TAPRIO_ATTR_FLAGS] =
1024 NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS),
1025 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
1026 [TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED },
1029 static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
1030 struct sched_entry *entry,
1031 struct netlink_ext_ack *extack)
1033 int min_duration = length_to_duration(q, ETH_ZLEN);
1036 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
1037 entry->command = nla_get_u8(
1038 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
1040 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
1041 entry->gate_mask = nla_get_u32(
1042 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
1044 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
1045 interval = nla_get_u32(
1046 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
1048 /* The interval should allow at least the minimum ethernet
1051 if (interval < min_duration) {
1052 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
1056 entry->interval = interval;
1061 static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
1062 struct sched_entry *entry, int index,
1063 struct netlink_ext_ack *extack)
1065 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
1068 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
1069 entry_policy, NULL);
1071 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
1075 entry->index = index;
1077 return fill_sched_entry(q, tb, entry, extack);
1080 static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
1081 struct sched_gate_list *sched,
1082 struct netlink_ext_ack *extack)
1091 nla_for_each_nested(n, list, rem) {
1092 struct sched_entry *entry;
1094 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
1095 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
1099 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1101 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
1105 err = parse_sched_entry(q, n, entry, i, extack);
1111 list_add_tail(&entry->list, &sched->entries);
1115 sched->num_entries = i;
1120 static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
1121 struct sched_gate_list *new,
1122 struct netlink_ext_ack *extack)
1126 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
1127 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
1131 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
1132 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
1134 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
1135 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
1137 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
1138 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
1140 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
1141 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
1146 if (!new->cycle_time) {
1147 struct sched_entry *entry;
1150 list_for_each_entry(entry, &new->entries, list)
1151 cycle = ktime_add_ns(cycle, entry->interval);
1154 NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
1158 if (cycle < 0 || cycle > INT_MAX) {
1159 NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
1163 new->cycle_time = cycle;
1166 taprio_calculate_gate_durations(q, new);
1171 static int taprio_parse_mqprio_opt(struct net_device *dev,
1172 struct tc_mqprio_qopt *qopt,
1173 struct netlink_ext_ack *extack,
1176 bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
1178 if (!qopt && !dev->num_tc) {
1179 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
1183 /* If num_tc is already set, it means that the user already
1184 * configured the mqprio part
1189 /* taprio imposes that traffic classes map 1:n to tx queues */
1190 if (qopt->num_tc > dev->num_tx_queues) {
1191 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
1195 /* For some reason, in txtime-assist mode, we allow TXQ ranges for
1196 * different TCs to overlap, and just validate the TXQ ranges.
1198 return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs,
1202 static int taprio_get_start_time(struct Qdisc *sch,
1203 struct sched_gate_list *sched,
1206 struct taprio_sched *q = qdisc_priv(sch);
1207 ktime_t now, base, cycle;
1210 base = sched_base_time(sched);
1211 now = taprio_get_time(q);
1213 if (ktime_after(base, now)) {
1218 cycle = sched->cycle_time;
1220 /* The qdisc is expected to have at least one sched_entry. Moreover,
1221 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
1222 * something went really wrong. In that case, we should warn about this
1223 * inconsistent state and return error.
1225 if (WARN_ON(!cycle))
1228 /* Schedule the start time for the beginning of the next
1231 n = div64_s64(ktime_sub_ns(now, base), cycle);
1232 *start = ktime_add_ns(base, (n + 1) * cycle);
1236 static void setup_first_end_time(struct taprio_sched *q,
1237 struct sched_gate_list *sched, ktime_t base)
1239 struct net_device *dev = qdisc_dev(q->root);
1240 int num_tc = netdev_get_num_tc(dev);
1241 struct sched_entry *first;
1245 first = list_first_entry(&sched->entries,
1246 struct sched_entry, list);
1248 cycle = sched->cycle_time;
1250 /* FIXME: find a better place to do this */
1251 sched->cycle_end_time = ktime_add_ns(base, cycle);
1253 first->end_time = ktime_add_ns(base, first->interval);
1254 taprio_set_budgets(q, sched, first);
1256 for (tc = 0; tc < num_tc; tc++) {
1257 if (first->gate_duration[tc] == sched->cycle_time)
1258 first->gate_close_time[tc] = KTIME_MAX;
1260 first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]);
1263 rcu_assign_pointer(q->current_entry, NULL);
1266 static void taprio_start_sched(struct Qdisc *sch,
1267 ktime_t start, struct sched_gate_list *new)
1269 struct taprio_sched *q = qdisc_priv(sch);
1272 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1275 expires = hrtimer_get_expires(&q->advance_timer);
1277 expires = KTIME_MAX;
1279 /* If the new schedule starts before the next expiration, we
1280 * reprogram it to the earliest one, so we change the admin
1281 * schedule to the operational one at the right time.
1283 start = min_t(ktime_t, start, expires);
1285 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1288 static void taprio_set_picos_per_byte(struct net_device *dev,
1289 struct taprio_sched *q)
1291 struct ethtool_link_ksettings ecmd;
1292 int speed = SPEED_10;
1296 err = __ethtool_get_link_ksettings(dev, &ecmd);
1300 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1301 speed = ecmd.base.speed;
1304 picos_per_byte = (USEC_PER_SEC * 8) / speed;
1306 atomic64_set(&q->picos_per_byte, picos_per_byte);
1307 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1308 dev->name, (long long)atomic64_read(&q->picos_per_byte),
1312 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1315 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1316 struct sched_gate_list *oper, *admin;
1317 struct qdisc_size_table *stab;
1318 struct taprio_sched *q;
1322 if (event != NETDEV_UP && event != NETDEV_CHANGE)
1325 list_for_each_entry(q, &taprio_list, taprio_list) {
1326 if (dev != qdisc_dev(q->root))
1329 taprio_set_picos_per_byte(dev, q);
1331 stab = rtnl_dereference(q->root->stab);
1333 oper = rtnl_dereference(q->oper_sched);
1335 taprio_update_queue_max_sdu(q, oper, stab);
1337 admin = rtnl_dereference(q->admin_sched);
1339 taprio_update_queue_max_sdu(q, admin, stab);
1347 static void setup_txtime(struct taprio_sched *q,
1348 struct sched_gate_list *sched, ktime_t base)
1350 struct sched_entry *entry;
1353 list_for_each_entry(entry, &sched->entries, list) {
1354 entry->next_txtime = ktime_add_ns(base, interval);
1355 interval += entry->interval;
1359 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1361 struct __tc_taprio_qopt_offload *__offload;
1363 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
1368 refcount_set(&__offload->users, 1);
1370 return &__offload->offload;
1373 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1376 struct __tc_taprio_qopt_offload *__offload;
1378 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1381 refcount_inc(&__offload->users);
1385 EXPORT_SYMBOL_GPL(taprio_offload_get);
1387 void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1389 struct __tc_taprio_qopt_offload *__offload;
1391 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1394 if (!refcount_dec_and_test(&__offload->users))
1399 EXPORT_SYMBOL_GPL(taprio_offload_free);
1401 /* The function will only serve to keep the pointers to the "oper" and "admin"
1402 * schedules valid in relation to their base times, so when calling dump() the
1403 * users looks at the right schedules.
1404 * When using full offload, the admin configuration is promoted to oper at the
1405 * base_time in the PHC time domain. But because the system time is not
1406 * necessarily in sync with that, we can't just trigger a hrtimer to call
1407 * switch_schedules at the right hardware time.
1408 * At the moment we call this by hand right away from taprio, but in the future
1409 * it will be useful to create a mechanism for drivers to notify taprio of the
1410 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1411 * This is left as TODO.
1413 static void taprio_offload_config_changed(struct taprio_sched *q)
1415 struct sched_gate_list *oper, *admin;
1417 oper = rtnl_dereference(q->oper_sched);
1418 admin = rtnl_dereference(q->admin_sched);
1420 switch_schedules(q, &admin, &oper);
1423 static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
1425 u32 i, queue_mask = 0;
1427 for (i = 0; i < dev->num_tc; i++) {
1430 if (!(tc_mask & BIT(i)))
1433 offset = dev->tc_to_txq[i].offset;
1434 count = dev->tc_to_txq[i].count;
1436 queue_mask |= GENMASK(offset + count - 1, offset);
1442 static void taprio_sched_to_offload(struct net_device *dev,
1443 struct sched_gate_list *sched,
1444 struct tc_taprio_qopt_offload *offload,
1445 const struct tc_taprio_caps *caps)
1447 struct sched_entry *entry;
1450 offload->base_time = sched->base_time;
1451 offload->cycle_time = sched->cycle_time;
1452 offload->cycle_time_extension = sched->cycle_time_extension;
1454 list_for_each_entry(entry, &sched->entries, list) {
1455 struct tc_taprio_sched_entry *e = &offload->entries[i];
1457 e->command = entry->command;
1458 e->interval = entry->interval;
1459 if (caps->gate_mask_per_txq)
1460 e->gate_mask = tc_map_to_queue_mask(dev,
1463 e->gate_mask = entry->gate_mask;
1468 offload->num_entries = i;
1471 static void taprio_detect_broken_mqprio(struct taprio_sched *q)
1473 struct net_device *dev = qdisc_dev(q->root);
1474 struct tc_taprio_caps caps;
1476 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
1477 &caps, sizeof(caps));
1479 q->broken_mqprio = caps.broken_mqprio;
1480 if (q->broken_mqprio)
1481 static_branch_inc(&taprio_have_broken_mqprio);
1483 static_branch_inc(&taprio_have_working_mqprio);
1485 q->detected_mqprio = true;
1488 static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
1490 if (!q->detected_mqprio)
1493 if (q->broken_mqprio)
1494 static_branch_dec(&taprio_have_broken_mqprio);
1496 static_branch_dec(&taprio_have_working_mqprio);
1499 static int taprio_enable_offload(struct net_device *dev,
1500 struct taprio_sched *q,
1501 struct sched_gate_list *sched,
1502 struct netlink_ext_ack *extack)
1504 const struct net_device_ops *ops = dev->netdev_ops;
1505 struct tc_taprio_qopt_offload *offload;
1506 struct tc_taprio_caps caps;
1509 if (!ops->ndo_setup_tc) {
1510 NL_SET_ERR_MSG(extack,
1511 "Device does not support taprio offload");
1515 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
1516 &caps, sizeof(caps));
1518 if (!caps.supports_queue_max_sdu) {
1519 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
1520 if (q->max_sdu[tc]) {
1521 NL_SET_ERR_MSG_MOD(extack,
1522 "Device does not handle queueMaxSDU");
1528 offload = taprio_offload_alloc(sched->num_entries);
1530 NL_SET_ERR_MSG(extack,
1531 "Not enough memory for enabling offload mode");
1534 offload->cmd = TAPRIO_CMD_REPLACE;
1535 offload->extack = extack;
1536 mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt);
1537 offload->mqprio.extack = extack;
1538 taprio_sched_to_offload(dev, sched, offload, &caps);
1539 mqprio_fp_to_offload(q->fp, &offload->mqprio);
1541 for (tc = 0; tc < TC_MAX_QUEUE; tc++)
1542 offload->max_sdu[tc] = q->max_sdu[tc];
1544 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1546 NL_SET_ERR_MSG_WEAK(extack,
1547 "Device failed to setup taprio offload");
1551 q->offloaded = true;
1554 /* The offload structure may linger around via a reference taken by the
1555 * device driver, so clear up the netlink extack pointer so that the
1556 * driver isn't tempted to dereference data which stopped being valid
1558 offload->extack = NULL;
1559 offload->mqprio.extack = NULL;
1560 taprio_offload_free(offload);
1565 static int taprio_disable_offload(struct net_device *dev,
1566 struct taprio_sched *q,
1567 struct netlink_ext_ack *extack)
1569 const struct net_device_ops *ops = dev->netdev_ops;
1570 struct tc_taprio_qopt_offload *offload;
1576 offload = taprio_offload_alloc(0);
1578 NL_SET_ERR_MSG(extack,
1579 "Not enough memory to disable offload mode");
1582 offload->cmd = TAPRIO_CMD_DESTROY;
1584 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1586 NL_SET_ERR_MSG(extack,
1587 "Device failed to disable offload");
1591 q->offloaded = false;
1594 taprio_offload_free(offload);
1599 /* If full offload is enabled, the only possible clockid is the net device's
1600 * PHC. For that reason, specifying a clockid through netlink is incorrect.
1601 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1602 * in sync with the specified clockid via a user space daemon such as phc2sys.
1603 * For both software taprio and txtime-assist, the clockid is used for the
1604 * hrtimer that advances the schedule and hence mandatory.
1606 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1607 struct netlink_ext_ack *extack)
1609 struct taprio_sched *q = qdisc_priv(sch);
1610 struct net_device *dev = qdisc_dev(sch);
1613 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1614 const struct ethtool_ops *ops = dev->ethtool_ops;
1615 struct ethtool_ts_info info = {
1616 .cmd = ETHTOOL_GET_TS_INFO,
1620 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1621 NL_SET_ERR_MSG(extack,
1622 "The 'clockid' cannot be specified for full offload");
1626 if (ops && ops->get_ts_info)
1627 err = ops->get_ts_info(dev, &info);
1629 if (err || info.phc_index < 0) {
1630 NL_SET_ERR_MSG(extack,
1631 "Device does not have a PTP clock");
1635 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1636 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1637 enum tk_offsets tk_offset;
1639 /* We only support static clockids and we don't allow
1640 * for it to be modified after the first init.
1643 (q->clockid != -1 && q->clockid != clockid)) {
1644 NL_SET_ERR_MSG(extack,
1645 "Changing the 'clockid' of a running schedule is not supported");
1651 case CLOCK_REALTIME:
1652 tk_offset = TK_OFFS_REAL;
1654 case CLOCK_MONOTONIC:
1655 tk_offset = TK_OFFS_MAX;
1657 case CLOCK_BOOTTIME:
1658 tk_offset = TK_OFFS_BOOT;
1661 tk_offset = TK_OFFS_TAI;
1664 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1668 /* This pairs with READ_ONCE() in taprio_mono_to_any */
1669 WRITE_ONCE(q->tk_offset, tk_offset);
1671 q->clockid = clockid;
1673 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1677 /* Everything went ok, return success. */
1684 static int taprio_parse_tc_entry(struct Qdisc *sch,
1686 u32 max_sdu[TC_QOPT_MAX_QUEUE],
1687 u32 fp[TC_QOPT_MAX_QUEUE],
1688 unsigned long *seen_tcs,
1689 struct netlink_ext_ack *extack)
1691 struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { };
1692 struct net_device *dev = qdisc_dev(sch);
1696 err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt,
1697 taprio_tc_policy, extack);
1701 if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
1702 NL_SET_ERR_MSG_MOD(extack, "TC entry index missing");
1706 tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
1707 if (tc >= TC_QOPT_MAX_QUEUE) {
1708 NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range");
1712 if (*seen_tcs & BIT(tc)) {
1713 NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry");
1717 *seen_tcs |= BIT(tc);
1719 if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) {
1720 val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
1721 if (val > dev->max_mtu) {
1722 NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU");
1729 if (tb[TCA_TAPRIO_TC_ENTRY_FP])
1730 fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]);
1735 static int taprio_parse_tc_entries(struct Qdisc *sch,
1737 struct netlink_ext_ack *extack)
1739 struct taprio_sched *q = qdisc_priv(sch);
1740 struct net_device *dev = qdisc_dev(sch);
1741 u32 max_sdu[TC_QOPT_MAX_QUEUE];
1742 bool have_preemption = false;
1743 unsigned long seen_tcs = 0;
1744 u32 fp[TC_QOPT_MAX_QUEUE];
1749 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
1750 max_sdu[tc] = q->max_sdu[tc];
1754 nla_for_each_nested(n, opt, rem) {
1755 if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
1758 err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs,
1764 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
1765 q->max_sdu[tc] = max_sdu[tc];
1767 if (fp[tc] != TC_FP_EXPRESS)
1768 have_preemption = true;
1771 if (have_preemption) {
1772 if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1773 NL_SET_ERR_MSG(extack,
1774 "Preemption only supported with full offload");
1778 if (!ethtool_dev_mm_supported(dev)) {
1779 NL_SET_ERR_MSG(extack,
1780 "Device does not support preemption");
1788 static int taprio_mqprio_cmp(const struct net_device *dev,
1789 const struct tc_mqprio_qopt *mqprio)
1793 if (!mqprio || mqprio->num_tc != dev->num_tc)
1796 for (i = 0; i < mqprio->num_tc; i++)
1797 if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1798 dev->tc_to_txq[i].offset != mqprio->offset[i])
1801 for (i = 0; i <= TC_BITMASK; i++)
1802 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1808 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1809 struct netlink_ext_ack *extack)
1811 struct qdisc_size_table *stab = rtnl_dereference(sch->stab);
1812 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1813 struct sched_gate_list *oper, *admin, *new_admin;
1814 struct taprio_sched *q = qdisc_priv(sch);
1815 struct net_device *dev = qdisc_dev(sch);
1816 struct tc_mqprio_qopt *mqprio = NULL;
1817 unsigned long flags;
1822 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1823 taprio_policy, extack);
1827 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1828 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1830 /* The semantics of the 'flags' argument in relation to 'change()'
1831 * requests, are interpreted following two rules (which are applied in
1832 * this order): (1) an omitted 'flags' argument is interpreted as
1833 * zero; (2) the 'flags' of a "running" taprio instance cannot be
1836 taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0;
1838 /* txtime-assist and full offload are mutually exclusive */
1839 if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
1840 (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) {
1841 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS],
1842 "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive");
1846 if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) {
1847 NL_SET_ERR_MSG_MOD(extack,
1848 "Changing 'flags' of a running schedule is not supported");
1851 q->flags = taprio_flags;
1853 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
1857 err = taprio_parse_tc_entries(sch, opt, extack);
1861 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1863 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1866 INIT_LIST_HEAD(&new_admin->entries);
1868 oper = rtnl_dereference(q->oper_sched);
1869 admin = rtnl_dereference(q->admin_sched);
1871 /* no changes - no new mqprio settings */
1872 if (!taprio_mqprio_cmp(dev, mqprio))
1875 if (mqprio && (oper || admin)) {
1876 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1882 err = netdev_set_num_tc(dev, mqprio->num_tc);
1885 for (i = 0; i < mqprio->num_tc; i++) {
1886 netdev_set_tc_queue(dev, i,
1889 q->cur_txq[i] = mqprio->offset[i];
1892 /* Always use supplied priority mappings */
1893 for (i = 0; i <= TC_BITMASK; i++)
1894 netdev_set_prio_tc_map(dev, i,
1895 mqprio->prio_tc_map[i]);
1898 err = parse_taprio_schedule(q, tb, new_admin, extack);
1902 if (new_admin->num_entries == 0) {
1903 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1908 err = taprio_parse_clockid(sch, tb, extack);
1912 taprio_set_picos_per_byte(dev, q);
1913 taprio_update_queue_max_sdu(q, new_admin, stab);
1915 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1916 err = taprio_enable_offload(dev, q, new_admin, extack);
1918 err = taprio_disable_offload(dev, q, extack);
1922 /* Protects against enqueue()/dequeue() */
1923 spin_lock_bh(qdisc_lock(sch));
1925 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1926 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1927 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1932 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1935 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1936 !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1937 !hrtimer_active(&q->advance_timer)) {
1938 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1939 q->advance_timer.function = advance_sched;
1942 err = taprio_get_start_time(sch, new_admin, &start);
1944 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1948 setup_txtime(q, new_admin, start);
1950 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1952 rcu_assign_pointer(q->oper_sched, new_admin);
1958 rcu_assign_pointer(q->admin_sched, new_admin);
1960 call_rcu(&admin->rcu, taprio_free_sched_cb);
1962 setup_first_end_time(q, new_admin, start);
1964 /* Protects against advance_sched() */
1965 spin_lock_irqsave(&q->current_entry_lock, flags);
1967 taprio_start_sched(sch, start, new_admin);
1969 rcu_assign_pointer(q->admin_sched, new_admin);
1971 call_rcu(&admin->rcu, taprio_free_sched_cb);
1973 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1975 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1976 taprio_offload_config_changed(q);
1983 NL_SET_ERR_MSG_MOD(extack,
1984 "Size table not specified, frame length estimations may be inaccurate");
1987 spin_unlock_bh(qdisc_lock(sch));
1991 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1996 static void taprio_reset(struct Qdisc *sch)
1998 struct taprio_sched *q = qdisc_priv(sch);
1999 struct net_device *dev = qdisc_dev(sch);
2002 hrtimer_cancel(&q->advance_timer);
2005 for (i = 0; i < dev->num_tx_queues; i++)
2007 qdisc_reset(q->qdiscs[i]);
2011 static void taprio_destroy(struct Qdisc *sch)
2013 struct taprio_sched *q = qdisc_priv(sch);
2014 struct net_device *dev = qdisc_dev(sch);
2015 struct sched_gate_list *oper, *admin;
2018 list_del(&q->taprio_list);
2020 /* Note that taprio_reset() might not be called if an error
2021 * happens in qdisc_create(), after taprio_init() has been called.
2023 hrtimer_cancel(&q->advance_timer);
2024 qdisc_synchronize(sch);
2026 taprio_disable_offload(dev, q, NULL);
2029 for (i = 0; i < dev->num_tx_queues; i++)
2030 qdisc_put(q->qdiscs[i]);
2036 netdev_reset_tc(dev);
2038 oper = rtnl_dereference(q->oper_sched);
2039 admin = rtnl_dereference(q->admin_sched);
2042 call_rcu(&oper->rcu, taprio_free_sched_cb);
2045 call_rcu(&admin->rcu, taprio_free_sched_cb);
2047 taprio_cleanup_broken_mqprio(q);
2050 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
2051 struct netlink_ext_ack *extack)
2053 struct taprio_sched *q = qdisc_priv(sch);
2054 struct net_device *dev = qdisc_dev(sch);
2057 spin_lock_init(&q->current_entry_lock);
2059 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
2060 q->advance_timer.function = advance_sched;
2064 /* We only support static clockids. Use an invalid value as default
2065 * and get the valid one on taprio_change().
2068 q->flags = TAPRIO_FLAGS_INVALID;
2070 list_add(&q->taprio_list, &taprio_list);
2072 if (sch->parent != TC_H_ROOT) {
2073 NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc");
2077 if (!netif_is_multiqueue(dev)) {
2078 NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required");
2082 q->qdiscs = kcalloc(dev->num_tx_queues, sizeof(q->qdiscs[0]),
2090 for (i = 0; i < dev->num_tx_queues; i++) {
2091 struct netdev_queue *dev_queue;
2092 struct Qdisc *qdisc;
2094 dev_queue = netdev_get_tx_queue(dev, i);
2095 qdisc = qdisc_create_dflt(dev_queue,
2097 TC_H_MAKE(TC_H_MAJ(sch->handle),
2103 if (i < dev->real_num_tx_queues)
2104 qdisc_hash_add(qdisc, false);
2106 q->qdiscs[i] = qdisc;
2109 for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
2110 q->fp[tc] = TC_FP_EXPRESS;
2112 taprio_detect_broken_mqprio(q);
2114 return taprio_change(sch, opt, extack);
2117 static void taprio_attach(struct Qdisc *sch)
2119 struct taprio_sched *q = qdisc_priv(sch);
2120 struct net_device *dev = qdisc_dev(sch);
2123 /* Attach underlying qdisc */
2124 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
2125 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
2126 struct Qdisc *old, *dev_queue_qdisc;
2128 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
2129 struct Qdisc *qdisc = q->qdiscs[ntx];
2131 /* In offload mode, the root taprio qdisc is bypassed
2132 * and the netdev TX queues see the children directly
2134 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2135 dev_queue_qdisc = qdisc;
2137 /* In software mode, attach the root taprio qdisc
2138 * to all netdev TX queues, so that dev_qdisc_enqueue()
2139 * goes through taprio_enqueue().
2141 dev_queue_qdisc = sch;
2143 old = dev_graft_qdisc(dev_queue, dev_queue_qdisc);
2144 /* The qdisc's refcount requires to be elevated once
2145 * for each netdev TX queue it is grafted onto
2147 qdisc_refcount_inc(dev_queue_qdisc);
2153 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
2156 struct net_device *dev = qdisc_dev(sch);
2157 unsigned long ntx = cl - 1;
2159 if (ntx >= dev->num_tx_queues)
2162 return netdev_get_tx_queue(dev, ntx);
2165 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
2166 struct Qdisc *new, struct Qdisc **old,
2167 struct netlink_ext_ack *extack)
2169 struct taprio_sched *q = qdisc_priv(sch);
2170 struct net_device *dev = qdisc_dev(sch);
2171 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
2176 if (dev->flags & IFF_UP)
2177 dev_deactivate(dev);
2179 /* In offload mode, the child Qdisc is directly attached to the netdev
2180 * TX queue, and thus, we need to keep its refcount elevated in order
2181 * to counteract qdisc_graft()'s call to qdisc_put() once per TX queue.
2182 * However, save the reference to the new qdisc in the private array in
2183 * both software and offload cases, to have an up-to-date reference to
2186 *old = q->qdiscs[cl - 1];
2187 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
2188 WARN_ON_ONCE(dev_graft_qdisc(dev_queue, new) != *old);
2190 qdisc_refcount_inc(new);
2195 q->qdiscs[cl - 1] = new;
2197 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2199 if (dev->flags & IFF_UP)
2205 static int dump_entry(struct sk_buff *msg,
2206 const struct sched_entry *entry)
2208 struct nlattr *item;
2210 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
2214 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
2215 goto nla_put_failure;
2217 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
2218 goto nla_put_failure;
2220 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
2222 goto nla_put_failure;
2224 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
2226 goto nla_put_failure;
2228 return nla_nest_end(msg, item);
2231 nla_nest_cancel(msg, item);
2235 static int dump_schedule(struct sk_buff *msg,
2236 const struct sched_gate_list *root)
2238 struct nlattr *entry_list;
2239 struct sched_entry *entry;
2241 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
2242 root->base_time, TCA_TAPRIO_PAD))
2245 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
2246 root->cycle_time, TCA_TAPRIO_PAD))
2249 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
2250 root->cycle_time_extension, TCA_TAPRIO_PAD))
2253 entry_list = nla_nest_start_noflag(msg,
2254 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
2258 list_for_each_entry(entry, &root->entries, list) {
2259 if (dump_entry(msg, entry) < 0)
2263 nla_nest_end(msg, entry_list);
2267 nla_nest_cancel(msg, entry_list);
2271 static int taprio_dump_tc_entries(struct sk_buff *skb,
2272 struct taprio_sched *q,
2273 struct sched_gate_list *sched)
2278 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
2279 n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY);
2283 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc))
2284 goto nla_put_failure;
2286 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
2287 sched->max_sdu[tc]))
2288 goto nla_put_failure;
2290 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc]))
2291 goto nla_put_failure;
2293 nla_nest_end(skb, n);
2299 nla_nest_cancel(skb, n);
2303 static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype)
2305 if (val == TAPRIO_STAT_NOT_SET)
2307 if (nla_put_u64_64bit(skb, attrtype, val, TCA_TAPRIO_OFFLOAD_STATS_PAD))
2312 static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d,
2313 struct tc_taprio_qopt_offload *offload,
2314 struct tc_taprio_qopt_stats *stats)
2316 struct net_device *dev = qdisc_dev(sch);
2317 const struct net_device_ops *ops;
2318 struct sk_buff *skb = d->skb;
2319 struct nlattr *xstats;
2322 ops = qdisc_dev(sch)->netdev_ops;
2324 /* FIXME I could use qdisc_offload_dump_helper(), but that messes
2325 * with sch->flags depending on whether the device reports taprio
2326 * stats, and I'm not sure whether that's a good idea, considering
2327 * that stats are optional to the offload itself
2329 if (!ops->ndo_setup_tc)
2332 memset(stats, 0xff, sizeof(*stats));
2334 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
2335 if (err == -EOPNOTSUPP)
2340 xstats = nla_nest_start(skb, TCA_STATS_APP);
2344 if (taprio_put_stat(skb, stats->window_drops,
2345 TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS) ||
2346 taprio_put_stat(skb, stats->tx_overruns,
2347 TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS))
2350 nla_nest_end(skb, xstats);
2355 nla_nest_cancel(skb, xstats);
2360 static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2362 struct tc_taprio_qopt_offload offload = {
2363 .cmd = TAPRIO_CMD_STATS,
2366 return taprio_dump_xstats(sch, d, &offload, &offload.stats);
2369 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
2371 struct taprio_sched *q = qdisc_priv(sch);
2372 struct net_device *dev = qdisc_dev(sch);
2373 struct sched_gate_list *oper, *admin;
2374 struct tc_mqprio_qopt opt = { 0 };
2375 struct nlattr *nest, *sched_nest;
2377 oper = rtnl_dereference(q->oper_sched);
2378 admin = rtnl_dereference(q->admin_sched);
2380 mqprio_qopt_reconstruct(dev, &opt);
2382 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2386 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
2389 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
2390 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
2393 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
2396 if (q->txtime_delay &&
2397 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
2400 if (oper && taprio_dump_tc_entries(skb, q, oper))
2403 if (oper && dump_schedule(skb, oper))
2409 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
2413 if (dump_schedule(skb, admin))
2416 nla_nest_end(skb, sched_nest);
2419 return nla_nest_end(skb, nest);
2422 nla_nest_cancel(skb, sched_nest);
2425 nla_nest_cancel(skb, nest);
2431 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
2433 struct taprio_sched *q = qdisc_priv(sch);
2434 struct net_device *dev = qdisc_dev(sch);
2435 unsigned int ntx = cl - 1;
2437 if (ntx >= dev->num_tx_queues)
2440 return q->qdiscs[ntx];
2443 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
2445 unsigned int ntx = TC_H_MIN(classid);
2447 if (!taprio_queue_get(sch, ntx))
2452 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
2453 struct sk_buff *skb, struct tcmsg *tcm)
2455 struct Qdisc *child = taprio_leaf(sch, cl);
2457 tcm->tcm_parent = TC_H_ROOT;
2458 tcm->tcm_handle |= TC_H_MIN(cl);
2459 tcm->tcm_info = child->handle;
2464 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2465 struct gnet_dump *d)
2469 struct Qdisc *child = taprio_leaf(sch, cl);
2470 struct tc_taprio_qopt_offload offload = {
2471 .cmd = TAPRIO_CMD_QUEUE_STATS,
2477 if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 ||
2478 qdisc_qstats_copy(d, child) < 0)
2481 return taprio_dump_xstats(sch, d, &offload, &offload.queue_stats.stats);
2484 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2486 struct net_device *dev = qdisc_dev(sch);
2492 arg->count = arg->skip;
2493 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
2494 if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
2499 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
2502 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
2505 static const struct Qdisc_class_ops taprio_class_ops = {
2506 .graft = taprio_graft,
2507 .leaf = taprio_leaf,
2508 .find = taprio_find,
2509 .walk = taprio_walk,
2510 .dump = taprio_dump_class,
2511 .dump_stats = taprio_dump_class_stats,
2512 .select_queue = taprio_select_queue,
2515 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2516 .cl_ops = &taprio_class_ops,
2518 .priv_size = sizeof(struct taprio_sched),
2519 .init = taprio_init,
2520 .change = taprio_change,
2521 .destroy = taprio_destroy,
2522 .reset = taprio_reset,
2523 .attach = taprio_attach,
2524 .peek = taprio_peek,
2525 .dequeue = taprio_dequeue,
2526 .enqueue = taprio_enqueue,
2527 .dump = taprio_dump,
2528 .dump_stats = taprio_dump_stats,
2529 .owner = THIS_MODULE,
2531 MODULE_ALIAS_NET_SCH("taprio");
2533 static struct notifier_block taprio_device_notifier = {
2534 .notifier_call = taprio_dev_notifier,
2537 static int __init taprio_module_init(void)
2539 int err = register_netdevice_notifier(&taprio_device_notifier);
2544 return register_qdisc(&taprio_qdisc_ops);
2547 static void __exit taprio_module_exit(void)
2549 unregister_qdisc(&taprio_qdisc_ops);
2550 unregister_netdevice_notifier(&taprio_device_notifier);
2553 module_init(taprio_module_init);
2554 module_exit(taprio_module_exit);
2555 MODULE_LICENSE("GPL");
2556 MODULE_DESCRIPTION("Time Aware Priority qdisc");