sched/doc: Update documentation for base_slice_ns and CONFIG_HZ relation
[sfrench/cifs-2.6.git] / net / sched / sch_taprio.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* net/sched/sch_taprio.c        Time Aware Priority Scheduler
4  *
5  * Authors:     Vinicius Costa Gomes <vinicius.gomes@intel.com>
6  *
7  */
8
9 #include <linux/ethtool.h>
10 #include <linux/ethtool_netlink.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/list.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/time.h>
23 #include <net/gso.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
27 #include <net/sch_generic.h>
28 #include <net/sock.h>
29 #include <net/tcp.h>
30
31 #define TAPRIO_STAT_NOT_SET     (~0ULL)
32
33 #include "sch_mqprio_lib.h"
34
35 static LIST_HEAD(taprio_list);
36 static struct static_key_false taprio_have_broken_mqprio;
37 static struct static_key_false taprio_have_working_mqprio;
38
39 #define TAPRIO_ALL_GATES_OPEN -1
40
41 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
42 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
43 #define TAPRIO_SUPPORTED_FLAGS \
44         (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
45 #define TAPRIO_FLAGS_INVALID U32_MAX
46
47 struct sched_entry {
48         /* Durations between this GCL entry and the GCL entry where the
49          * respective traffic class gate closes
50          */
51         u64 gate_duration[TC_MAX_QUEUE];
52         atomic_t budget[TC_MAX_QUEUE];
53         /* The qdisc makes some effort so that no packet leaves
54          * after this time
55          */
56         ktime_t gate_close_time[TC_MAX_QUEUE];
57         struct list_head list;
58         /* Used to calculate when to advance the schedule */
59         ktime_t end_time;
60         ktime_t next_txtime;
61         int index;
62         u32 gate_mask;
63         u32 interval;
64         u8 command;
65 };
66
67 struct sched_gate_list {
68         /* Longest non-zero contiguous gate durations per traffic class,
69          * or 0 if a traffic class gate never opens during the schedule.
70          */
71         u64 max_open_gate_duration[TC_MAX_QUEUE];
72         u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
73         u32 max_sdu[TC_MAX_QUEUE]; /* for dump */
74         struct rcu_head rcu;
75         struct list_head entries;
76         size_t num_entries;
77         ktime_t cycle_end_time;
78         s64 cycle_time;
79         s64 cycle_time_extension;
80         s64 base_time;
81 };
82
83 struct taprio_sched {
84         struct Qdisc **qdiscs;
85         struct Qdisc *root;
86         u32 flags;
87         enum tk_offsets tk_offset;
88         int clockid;
89         bool offloaded;
90         bool detected_mqprio;
91         bool broken_mqprio;
92         atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
93                                     * speeds it's sub-nanoseconds per byte
94                                     */
95
96         /* Protects the update side of the RCU protected current_entry */
97         spinlock_t current_entry_lock;
98         struct sched_entry __rcu *current_entry;
99         struct sched_gate_list __rcu *oper_sched;
100         struct sched_gate_list __rcu *admin_sched;
101         struct hrtimer advance_timer;
102         struct list_head taprio_list;
103         int cur_txq[TC_MAX_QUEUE];
104         u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */
105         u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */
106         u32 txtime_delay;
107 };
108
109 struct __tc_taprio_qopt_offload {
110         refcount_t users;
111         struct tc_taprio_qopt_offload offload;
112 };
113
114 static void taprio_calculate_gate_durations(struct taprio_sched *q,
115                                             struct sched_gate_list *sched)
116 {
117         struct net_device *dev = qdisc_dev(q->root);
118         int num_tc = netdev_get_num_tc(dev);
119         struct sched_entry *entry, *cur;
120         int tc;
121
122         list_for_each_entry(entry, &sched->entries, list) {
123                 u32 gates_still_open = entry->gate_mask;
124
125                 /* For each traffic class, calculate each open gate duration,
126                  * starting at this schedule entry and ending at the schedule
127                  * entry containing a gate close event for that TC.
128                  */
129                 cur = entry;
130
131                 do {
132                         if (!gates_still_open)
133                                 break;
134
135                         for (tc = 0; tc < num_tc; tc++) {
136                                 if (!(gates_still_open & BIT(tc)))
137                                         continue;
138
139                                 if (cur->gate_mask & BIT(tc))
140                                         entry->gate_duration[tc] += cur->interval;
141                                 else
142                                         gates_still_open &= ~BIT(tc);
143                         }
144
145                         cur = list_next_entry_circular(cur, &sched->entries, list);
146                 } while (cur != entry);
147
148                 /* Keep track of the maximum gate duration for each traffic
149                  * class, taking care to not confuse a traffic class which is
150                  * temporarily closed with one that is always closed.
151                  */
152                 for (tc = 0; tc < num_tc; tc++)
153                         if (entry->gate_duration[tc] &&
154                             sched->max_open_gate_duration[tc] < entry->gate_duration[tc])
155                                 sched->max_open_gate_duration[tc] = entry->gate_duration[tc];
156         }
157 }
158
159 static bool taprio_entry_allows_tx(ktime_t skb_end_time,
160                                    struct sched_entry *entry, int tc)
161 {
162         return ktime_before(skb_end_time, entry->gate_close_time[tc]);
163 }
164
165 static ktime_t sched_base_time(const struct sched_gate_list *sched)
166 {
167         if (!sched)
168                 return KTIME_MAX;
169
170         return ns_to_ktime(sched->base_time);
171 }
172
173 static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
174 {
175         /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
176         enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
177
178         switch (tk_offset) {
179         case TK_OFFS_MAX:
180                 return mono;
181         default:
182                 return ktime_mono_to_any(mono, tk_offset);
183         }
184 }
185
186 static ktime_t taprio_get_time(const struct taprio_sched *q)
187 {
188         return taprio_mono_to_any(q, ktime_get());
189 }
190
191 static void taprio_free_sched_cb(struct rcu_head *head)
192 {
193         struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
194         struct sched_entry *entry, *n;
195
196         list_for_each_entry_safe(entry, n, &sched->entries, list) {
197                 list_del(&entry->list);
198                 kfree(entry);
199         }
200
201         kfree(sched);
202 }
203
204 static void switch_schedules(struct taprio_sched *q,
205                              struct sched_gate_list **admin,
206                              struct sched_gate_list **oper)
207 {
208         rcu_assign_pointer(q->oper_sched, *admin);
209         rcu_assign_pointer(q->admin_sched, NULL);
210
211         if (*oper)
212                 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
213
214         *oper = *admin;
215         *admin = NULL;
216 }
217
218 /* Get how much time has been already elapsed in the current cycle. */
219 static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
220 {
221         ktime_t time_since_sched_start;
222         s32 time_elapsed;
223
224         time_since_sched_start = ktime_sub(time, sched->base_time);
225         div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
226
227         return time_elapsed;
228 }
229
230 static ktime_t get_interval_end_time(struct sched_gate_list *sched,
231                                      struct sched_gate_list *admin,
232                                      struct sched_entry *entry,
233                                      ktime_t intv_start)
234 {
235         s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
236         ktime_t intv_end, cycle_ext_end, cycle_end;
237
238         cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
239         intv_end = ktime_add_ns(intv_start, entry->interval);
240         cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
241
242         if (ktime_before(intv_end, cycle_end))
243                 return intv_end;
244         else if (admin && admin != sched &&
245                  ktime_after(admin->base_time, cycle_end) &&
246                  ktime_before(admin->base_time, cycle_ext_end))
247                 return admin->base_time;
248         else
249                 return cycle_end;
250 }
251
252 static int length_to_duration(struct taprio_sched *q, int len)
253 {
254         return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
255 }
256
257 static int duration_to_length(struct taprio_sched *q, u64 duration)
258 {
259         return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte));
260 }
261
262 /* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the
263  * q->max_sdu[] requested by the user and the max_sdu dynamically determined by
264  * the maximum open gate durations at the given link speed.
265  */
266 static void taprio_update_queue_max_sdu(struct taprio_sched *q,
267                                         struct sched_gate_list *sched,
268                                         struct qdisc_size_table *stab)
269 {
270         struct net_device *dev = qdisc_dev(q->root);
271         int num_tc = netdev_get_num_tc(dev);
272         u32 max_sdu_from_user;
273         u32 max_sdu_dynamic;
274         u32 max_sdu;
275         int tc;
276
277         for (tc = 0; tc < num_tc; tc++) {
278                 max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX;
279
280                 /* TC gate never closes => keep the queueMaxSDU
281                  * selected by the user
282                  */
283                 if (sched->max_open_gate_duration[tc] == sched->cycle_time) {
284                         max_sdu_dynamic = U32_MAX;
285                 } else {
286                         u32 max_frm_len;
287
288                         max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]);
289                         /* Compensate for L1 overhead from size table,
290                          * but don't let the frame size go negative
291                          */
292                         if (stab) {
293                                 max_frm_len -= stab->szopts.overhead;
294                                 max_frm_len = max_t(int, max_frm_len,
295                                                     dev->hard_header_len + 1);
296                         }
297                         max_sdu_dynamic = max_frm_len - dev->hard_header_len;
298                         if (max_sdu_dynamic > dev->max_mtu)
299                                 max_sdu_dynamic = U32_MAX;
300                 }
301
302                 max_sdu = min(max_sdu_dynamic, max_sdu_from_user);
303
304                 if (max_sdu != U32_MAX) {
305                         sched->max_frm_len[tc] = max_sdu + dev->hard_header_len;
306                         sched->max_sdu[tc] = max_sdu;
307                 } else {
308                         sched->max_frm_len[tc] = U32_MAX; /* never oversized */
309                         sched->max_sdu[tc] = 0;
310                 }
311         }
312 }
313
314 /* Returns the entry corresponding to next available interval. If
315  * validate_interval is set, it only validates whether the timestamp occurs
316  * when the gate corresponding to the skb's traffic class is open.
317  */
318 static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
319                                                   struct Qdisc *sch,
320                                                   struct sched_gate_list *sched,
321                                                   struct sched_gate_list *admin,
322                                                   ktime_t time,
323                                                   ktime_t *interval_start,
324                                                   ktime_t *interval_end,
325                                                   bool validate_interval)
326 {
327         ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
328         ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
329         struct sched_entry *entry = NULL, *entry_found = NULL;
330         struct taprio_sched *q = qdisc_priv(sch);
331         struct net_device *dev = qdisc_dev(sch);
332         bool entry_available = false;
333         s32 cycle_elapsed;
334         int tc, n;
335
336         tc = netdev_get_prio_tc_map(dev, skb->priority);
337         packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
338
339         *interval_start = 0;
340         *interval_end = 0;
341
342         if (!sched)
343                 return NULL;
344
345         cycle = sched->cycle_time;
346         cycle_elapsed = get_cycle_time_elapsed(sched, time);
347         curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
348         cycle_end = ktime_add_ns(curr_intv_end, cycle);
349
350         list_for_each_entry(entry, &sched->entries, list) {
351                 curr_intv_start = curr_intv_end;
352                 curr_intv_end = get_interval_end_time(sched, admin, entry,
353                                                       curr_intv_start);
354
355                 if (ktime_after(curr_intv_start, cycle_end))
356                         break;
357
358                 if (!(entry->gate_mask & BIT(tc)) ||
359                     packet_transmit_time > entry->interval)
360                         continue;
361
362                 txtime = entry->next_txtime;
363
364                 if (ktime_before(txtime, time) || validate_interval) {
365                         transmit_end_time = ktime_add_ns(time, packet_transmit_time);
366                         if ((ktime_before(curr_intv_start, time) &&
367                              ktime_before(transmit_end_time, curr_intv_end)) ||
368                             (ktime_after(curr_intv_start, time) && !validate_interval)) {
369                                 entry_found = entry;
370                                 *interval_start = curr_intv_start;
371                                 *interval_end = curr_intv_end;
372                                 break;
373                         } else if (!entry_available && !validate_interval) {
374                                 /* Here, we are just trying to find out the
375                                  * first available interval in the next cycle.
376                                  */
377                                 entry_available = true;
378                                 entry_found = entry;
379                                 *interval_start = ktime_add_ns(curr_intv_start, cycle);
380                                 *interval_end = ktime_add_ns(curr_intv_end, cycle);
381                         }
382                 } else if (ktime_before(txtime, earliest_txtime) &&
383                            !entry_available) {
384                         earliest_txtime = txtime;
385                         entry_found = entry;
386                         n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
387                         *interval_start = ktime_add(curr_intv_start, n * cycle);
388                         *interval_end = ktime_add(curr_intv_end, n * cycle);
389                 }
390         }
391
392         return entry_found;
393 }
394
395 static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
396 {
397         struct taprio_sched *q = qdisc_priv(sch);
398         struct sched_gate_list *sched, *admin;
399         ktime_t interval_start, interval_end;
400         struct sched_entry *entry;
401
402         rcu_read_lock();
403         sched = rcu_dereference(q->oper_sched);
404         admin = rcu_dereference(q->admin_sched);
405
406         entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
407                                        &interval_start, &interval_end, true);
408         rcu_read_unlock();
409
410         return entry;
411 }
412
413 /* This returns the tstamp value set by TCP in terms of the set clock. */
414 static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
415 {
416         unsigned int offset = skb_network_offset(skb);
417         const struct ipv6hdr *ipv6h;
418         const struct iphdr *iph;
419         struct ipv6hdr _ipv6h;
420
421         ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
422         if (!ipv6h)
423                 return 0;
424
425         if (ipv6h->version == 4) {
426                 iph = (struct iphdr *)ipv6h;
427                 offset += iph->ihl * 4;
428
429                 /* special-case 6in4 tunnelling, as that is a common way to get
430                  * v6 connectivity in the home
431                  */
432                 if (iph->protocol == IPPROTO_IPV6) {
433                         ipv6h = skb_header_pointer(skb, offset,
434                                                    sizeof(_ipv6h), &_ipv6h);
435
436                         if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
437                                 return 0;
438                 } else if (iph->protocol != IPPROTO_TCP) {
439                         return 0;
440                 }
441         } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
442                 return 0;
443         }
444
445         return taprio_mono_to_any(q, skb->skb_mstamp_ns);
446 }
447
448 /* There are a few scenarios where we will have to modify the txtime from
449  * what is read from next_txtime in sched_entry. They are:
450  * 1. If txtime is in the past,
451  *    a. The gate for the traffic class is currently open and packet can be
452  *       transmitted before it closes, schedule the packet right away.
453  *    b. If the gate corresponding to the traffic class is going to open later
454  *       in the cycle, set the txtime of packet to the interval start.
455  * 2. If txtime is in the future, there are packets corresponding to the
456  *    current traffic class waiting to be transmitted. So, the following
457  *    possibilities exist:
458  *    a. We can transmit the packet before the window containing the txtime
459  *       closes.
460  *    b. The window might close before the transmission can be completed
461  *       successfully. So, schedule the packet in the next open window.
462  */
463 static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
464 {
465         ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
466         struct taprio_sched *q = qdisc_priv(sch);
467         struct sched_gate_list *sched, *admin;
468         ktime_t minimum_time, now, txtime;
469         int len, packet_transmit_time;
470         struct sched_entry *entry;
471         bool sched_changed;
472
473         now = taprio_get_time(q);
474         minimum_time = ktime_add_ns(now, q->txtime_delay);
475
476         tcp_tstamp = get_tcp_tstamp(q, skb);
477         minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
478
479         rcu_read_lock();
480         admin = rcu_dereference(q->admin_sched);
481         sched = rcu_dereference(q->oper_sched);
482         if (admin && ktime_after(minimum_time, admin->base_time))
483                 switch_schedules(q, &admin, &sched);
484
485         /* Until the schedule starts, all the queues are open */
486         if (!sched || ktime_before(minimum_time, sched->base_time)) {
487                 txtime = minimum_time;
488                 goto done;
489         }
490
491         len = qdisc_pkt_len(skb);
492         packet_transmit_time = length_to_duration(q, len);
493
494         do {
495                 sched_changed = false;
496
497                 entry = find_entry_to_transmit(skb, sch, sched, admin,
498                                                minimum_time,
499                                                &interval_start, &interval_end,
500                                                false);
501                 if (!entry) {
502                         txtime = 0;
503                         goto done;
504                 }
505
506                 txtime = entry->next_txtime;
507                 txtime = max_t(ktime_t, txtime, minimum_time);
508                 txtime = max_t(ktime_t, txtime, interval_start);
509
510                 if (admin && admin != sched &&
511                     ktime_after(txtime, admin->base_time)) {
512                         sched = admin;
513                         sched_changed = true;
514                         continue;
515                 }
516
517                 transmit_end_time = ktime_add(txtime, packet_transmit_time);
518                 minimum_time = transmit_end_time;
519
520                 /* Update the txtime of current entry to the next time it's
521                  * interval starts.
522                  */
523                 if (ktime_after(transmit_end_time, interval_end))
524                         entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
525         } while (sched_changed || ktime_after(transmit_end_time, interval_end));
526
527         entry->next_txtime = transmit_end_time;
528
529 done:
530         rcu_read_unlock();
531         return txtime;
532 }
533
534 /* Devices with full offload are expected to honor this in hardware */
535 static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch,
536                                              struct sk_buff *skb)
537 {
538         struct taprio_sched *q = qdisc_priv(sch);
539         struct net_device *dev = qdisc_dev(sch);
540         struct sched_gate_list *sched;
541         int prio = skb->priority;
542         bool exceeds = false;
543         u8 tc;
544
545         tc = netdev_get_prio_tc_map(dev, prio);
546
547         rcu_read_lock();
548         sched = rcu_dereference(q->oper_sched);
549         if (sched && skb->len > sched->max_frm_len[tc])
550                 exceeds = true;
551         rcu_read_unlock();
552
553         return exceeds;
554 }
555
556 static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
557                               struct Qdisc *child, struct sk_buff **to_free)
558 {
559         struct taprio_sched *q = qdisc_priv(sch);
560
561         /* sk_flags are only safe to use on full sockets. */
562         if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
563                 if (!is_valid_interval(skb, sch))
564                         return qdisc_drop(skb, sch, to_free);
565         } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
566                 skb->tstamp = get_packet_txtime(skb, sch);
567                 if (!skb->tstamp)
568                         return qdisc_drop(skb, sch, to_free);
569         }
570
571         qdisc_qstats_backlog_inc(sch, skb);
572         sch->q.qlen++;
573
574         return qdisc_enqueue(skb, child, to_free);
575 }
576
577 static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
578                                     struct Qdisc *child,
579                                     struct sk_buff **to_free)
580 {
581         unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
582         netdev_features_t features = netif_skb_features(skb);
583         struct sk_buff *segs, *nskb;
584         int ret;
585
586         segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
587         if (IS_ERR_OR_NULL(segs))
588                 return qdisc_drop(skb, sch, to_free);
589
590         skb_list_walk_safe(segs, segs, nskb) {
591                 skb_mark_not_on_list(segs);
592                 qdisc_skb_cb(segs)->pkt_len = segs->len;
593                 slen += segs->len;
594
595                 /* FIXME: we should be segmenting to a smaller size
596                  * rather than dropping these
597                  */
598                 if (taprio_skb_exceeds_queue_max_sdu(sch, segs))
599                         ret = qdisc_drop(segs, sch, to_free);
600                 else
601                         ret = taprio_enqueue_one(segs, sch, child, to_free);
602
603                 if (ret != NET_XMIT_SUCCESS) {
604                         if (net_xmit_drop_count(ret))
605                                 qdisc_qstats_drop(sch);
606                 } else {
607                         numsegs++;
608                 }
609         }
610
611         if (numsegs > 1)
612                 qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
613         consume_skb(skb);
614
615         return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
616 }
617
618 /* Will not be called in the full offload case, since the TX queues are
619  * attached to the Qdisc created using qdisc_create_dflt()
620  */
621 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
622                           struct sk_buff **to_free)
623 {
624         struct taprio_sched *q = qdisc_priv(sch);
625         struct Qdisc *child;
626         int queue;
627
628         queue = skb_get_queue_mapping(skb);
629
630         child = q->qdiscs[queue];
631         if (unlikely(!child))
632                 return qdisc_drop(skb, sch, to_free);
633
634         if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) {
635                 /* Large packets might not be transmitted when the transmission
636                  * duration exceeds any configured interval. Therefore, segment
637                  * the skb into smaller chunks. Drivers with full offload are
638                  * expected to handle this in hardware.
639                  */
640                 if (skb_is_gso(skb))
641                         return taprio_enqueue_segmented(skb, sch, child,
642                                                         to_free);
643
644                 return qdisc_drop(skb, sch, to_free);
645         }
646
647         return taprio_enqueue_one(skb, sch, child, to_free);
648 }
649
650 static struct sk_buff *taprio_peek(struct Qdisc *sch)
651 {
652         WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented");
653         return NULL;
654 }
655
656 static void taprio_set_budgets(struct taprio_sched *q,
657                                struct sched_gate_list *sched,
658                                struct sched_entry *entry)
659 {
660         struct net_device *dev = qdisc_dev(q->root);
661         int num_tc = netdev_get_num_tc(dev);
662         int tc, budget;
663
664         for (tc = 0; tc < num_tc; tc++) {
665                 /* Traffic classes which never close have infinite budget */
666                 if (entry->gate_duration[tc] == sched->cycle_time)
667                         budget = INT_MAX;
668                 else
669                         budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC,
670                                            atomic64_read(&q->picos_per_byte));
671
672                 atomic_set(&entry->budget[tc], budget);
673         }
674 }
675
676 /* When an skb is sent, it consumes from the budget of all traffic classes */
677 static int taprio_update_budgets(struct sched_entry *entry, size_t len,
678                                  int tc_consumed, int num_tc)
679 {
680         int tc, budget, new_budget = 0;
681
682         for (tc = 0; tc < num_tc; tc++) {
683                 budget = atomic_read(&entry->budget[tc]);
684                 /* Don't consume from infinite budget */
685                 if (budget == INT_MAX) {
686                         if (tc == tc_consumed)
687                                 new_budget = budget;
688                         continue;
689                 }
690
691                 if (tc == tc_consumed)
692                         new_budget = atomic_sub_return(len, &entry->budget[tc]);
693                 else
694                         atomic_sub(len, &entry->budget[tc]);
695         }
696
697         return new_budget;
698 }
699
700 static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
701                                                struct sched_entry *entry,
702                                                u32 gate_mask)
703 {
704         struct taprio_sched *q = qdisc_priv(sch);
705         struct net_device *dev = qdisc_dev(sch);
706         struct Qdisc *child = q->qdiscs[txq];
707         int num_tc = netdev_get_num_tc(dev);
708         struct sk_buff *skb;
709         ktime_t guard;
710         int prio;
711         int len;
712         u8 tc;
713
714         if (unlikely(!child))
715                 return NULL;
716
717         if (TXTIME_ASSIST_IS_ENABLED(q->flags))
718                 goto skip_peek_checks;
719
720         skb = child->ops->peek(child);
721         if (!skb)
722                 return NULL;
723
724         prio = skb->priority;
725         tc = netdev_get_prio_tc_map(dev, prio);
726
727         if (!(gate_mask & BIT(tc)))
728                 return NULL;
729
730         len = qdisc_pkt_len(skb);
731         guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len));
732
733         /* In the case that there's no gate entry, there's no
734          * guard band ...
735          */
736         if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
737             !taprio_entry_allows_tx(guard, entry, tc))
738                 return NULL;
739
740         /* ... and no budget. */
741         if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
742             taprio_update_budgets(entry, len, tc, num_tc) < 0)
743                 return NULL;
744
745 skip_peek_checks:
746         skb = child->ops->dequeue(child);
747         if (unlikely(!skb))
748                 return NULL;
749
750         qdisc_bstats_update(sch, skb);
751         qdisc_qstats_backlog_dec(sch, skb);
752         sch->q.qlen--;
753
754         return skb;
755 }
756
757 static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq)
758 {
759         int offset = dev->tc_to_txq[tc].offset;
760         int count = dev->tc_to_txq[tc].count;
761
762         (*txq)++;
763         if (*txq == offset + count)
764                 *txq = offset;
765 }
766
767 /* Prioritize higher traffic classes, and select among TXQs belonging to the
768  * same TC using round robin
769  */
770 static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
771                                                   struct sched_entry *entry,
772                                                   u32 gate_mask)
773 {
774         struct taprio_sched *q = qdisc_priv(sch);
775         struct net_device *dev = qdisc_dev(sch);
776         int num_tc = netdev_get_num_tc(dev);
777         struct sk_buff *skb;
778         int tc;
779
780         for (tc = num_tc - 1; tc >= 0; tc--) {
781                 int first_txq = q->cur_txq[tc];
782
783                 if (!(gate_mask & BIT(tc)))
784                         continue;
785
786                 do {
787                         skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc],
788                                                       entry, gate_mask);
789
790                         taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
791
792                         if (q->cur_txq[tc] >= dev->num_tx_queues)
793                                 q->cur_txq[tc] = first_txq;
794
795                         if (skb)
796                                 return skb;
797                 } while (q->cur_txq[tc] != first_txq);
798         }
799
800         return NULL;
801 }
802
803 /* Broken way of prioritizing smaller TXQ indices and ignoring the traffic
804  * class other than to determine whether the gate is open or not
805  */
806 static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch,
807                                                    struct sched_entry *entry,
808                                                    u32 gate_mask)
809 {
810         struct net_device *dev = qdisc_dev(sch);
811         struct sk_buff *skb;
812         int i;
813
814         for (i = 0; i < dev->num_tx_queues; i++) {
815                 skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask);
816                 if (skb)
817                         return skb;
818         }
819
820         return NULL;
821 }
822
823 /* Will not be called in the full offload case, since the TX queues are
824  * attached to the Qdisc created using qdisc_create_dflt()
825  */
826 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
827 {
828         struct taprio_sched *q = qdisc_priv(sch);
829         struct sk_buff *skb = NULL;
830         struct sched_entry *entry;
831         u32 gate_mask;
832
833         rcu_read_lock();
834         entry = rcu_dereference(q->current_entry);
835         /* if there's no entry, it means that the schedule didn't
836          * start yet, so force all gates to be open, this is in
837          * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
838          * "AdminGateStates"
839          */
840         gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
841         if (!gate_mask)
842                 goto done;
843
844         if (static_branch_unlikely(&taprio_have_broken_mqprio) &&
845             !static_branch_likely(&taprio_have_working_mqprio)) {
846                 /* Single NIC kind which is broken */
847                 skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
848         } else if (static_branch_likely(&taprio_have_working_mqprio) &&
849                    !static_branch_unlikely(&taprio_have_broken_mqprio)) {
850                 /* Single NIC kind which prioritizes properly */
851                 skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
852         } else {
853                 /* Mixed NIC kinds present in system, need dynamic testing */
854                 if (q->broken_mqprio)
855                         skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
856                 else
857                         skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
858         }
859
860 done:
861         rcu_read_unlock();
862
863         return skb;
864 }
865
866 static bool should_restart_cycle(const struct sched_gate_list *oper,
867                                  const struct sched_entry *entry)
868 {
869         if (list_is_last(&entry->list, &oper->entries))
870                 return true;
871
872         if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0)
873                 return true;
874
875         return false;
876 }
877
878 static bool should_change_schedules(const struct sched_gate_list *admin,
879                                     const struct sched_gate_list *oper,
880                                     ktime_t end_time)
881 {
882         ktime_t next_base_time, extension_time;
883
884         if (!admin)
885                 return false;
886
887         next_base_time = sched_base_time(admin);
888
889         /* This is the simple case, the end_time would fall after
890          * the next schedule base_time.
891          */
892         if (ktime_compare(next_base_time, end_time) <= 0)
893                 return true;
894
895         /* This is the cycle_time_extension case, if the end_time
896          * plus the amount that can be extended would fall after the
897          * next schedule base_time, we can extend the current schedule
898          * for that amount.
899          */
900         extension_time = ktime_add_ns(end_time, oper->cycle_time_extension);
901
902         /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
903          * how precisely the extension should be made. So after
904          * conformance testing, this logic may change.
905          */
906         if (ktime_compare(next_base_time, extension_time) <= 0)
907                 return true;
908
909         return false;
910 }
911
912 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
913 {
914         struct taprio_sched *q = container_of(timer, struct taprio_sched,
915                                               advance_timer);
916         struct net_device *dev = qdisc_dev(q->root);
917         struct sched_gate_list *oper, *admin;
918         int num_tc = netdev_get_num_tc(dev);
919         struct sched_entry *entry, *next;
920         struct Qdisc *sch = q->root;
921         ktime_t end_time;
922         int tc;
923
924         spin_lock(&q->current_entry_lock);
925         entry = rcu_dereference_protected(q->current_entry,
926                                           lockdep_is_held(&q->current_entry_lock));
927         oper = rcu_dereference_protected(q->oper_sched,
928                                          lockdep_is_held(&q->current_entry_lock));
929         admin = rcu_dereference_protected(q->admin_sched,
930                                           lockdep_is_held(&q->current_entry_lock));
931
932         if (!oper)
933                 switch_schedules(q, &admin, &oper);
934
935         /* This can happen in two cases: 1. this is the very first run
936          * of this function (i.e. we weren't running any schedule
937          * previously); 2. The previous schedule just ended. The first
938          * entry of all schedules are pre-calculated during the
939          * schedule initialization.
940          */
941         if (unlikely(!entry || entry->end_time == oper->base_time)) {
942                 next = list_first_entry(&oper->entries, struct sched_entry,
943                                         list);
944                 end_time = next->end_time;
945                 goto first_run;
946         }
947
948         if (should_restart_cycle(oper, entry)) {
949                 next = list_first_entry(&oper->entries, struct sched_entry,
950                                         list);
951                 oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time,
952                                                     oper->cycle_time);
953         } else {
954                 next = list_next_entry(entry, list);
955         }
956
957         end_time = ktime_add_ns(entry->end_time, next->interval);
958         end_time = min_t(ktime_t, end_time, oper->cycle_end_time);
959
960         for (tc = 0; tc < num_tc; tc++) {
961                 if (next->gate_duration[tc] == oper->cycle_time)
962                         next->gate_close_time[tc] = KTIME_MAX;
963                 else
964                         next->gate_close_time[tc] = ktime_add_ns(entry->end_time,
965                                                                  next->gate_duration[tc]);
966         }
967
968         if (should_change_schedules(admin, oper, end_time)) {
969                 /* Set things so the next time this runs, the new
970                  * schedule runs.
971                  */
972                 end_time = sched_base_time(admin);
973                 switch_schedules(q, &admin, &oper);
974         }
975
976         next->end_time = end_time;
977         taprio_set_budgets(q, oper, next);
978
979 first_run:
980         rcu_assign_pointer(q->current_entry, next);
981         spin_unlock(&q->current_entry_lock);
982
983         hrtimer_set_expires(&q->advance_timer, end_time);
984
985         rcu_read_lock();
986         __netif_schedule(sch);
987         rcu_read_unlock();
988
989         return HRTIMER_RESTART;
990 }
991
992 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
993         [TCA_TAPRIO_SCHED_ENTRY_INDEX]     = { .type = NLA_U32 },
994         [TCA_TAPRIO_SCHED_ENTRY_CMD]       = { .type = NLA_U8 },
995         [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
996         [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]  = { .type = NLA_U32 },
997 };
998
999 static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
1000         [TCA_TAPRIO_TC_ENTRY_INDEX]        = { .type = NLA_U32 },
1001         [TCA_TAPRIO_TC_ENTRY_MAX_SDU]      = { .type = NLA_U32 },
1002         [TCA_TAPRIO_TC_ENTRY_FP]           = NLA_POLICY_RANGE(NLA_U32,
1003                                                               TC_FP_EXPRESS,
1004                                                               TC_FP_PREEMPTIBLE),
1005 };
1006
1007 static const struct netlink_range_validation_signed taprio_cycle_time_range = {
1008         .min = 0,
1009         .max = INT_MAX,
1010 };
1011
1012 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
1013         [TCA_TAPRIO_ATTR_PRIOMAP]              = {
1014                 .len = sizeof(struct tc_mqprio_qopt)
1015         },
1016         [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]           = { .type = NLA_NESTED },
1017         [TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
1018         [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
1019         [TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
1020         [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           =
1021                 NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
1022         [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
1023         [TCA_TAPRIO_ATTR_FLAGS]                      =
1024                 NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS),
1025         [TCA_TAPRIO_ATTR_TXTIME_DELAY]               = { .type = NLA_U32 },
1026         [TCA_TAPRIO_ATTR_TC_ENTRY]                   = { .type = NLA_NESTED },
1027 };
1028
1029 static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
1030                             struct sched_entry *entry,
1031                             struct netlink_ext_ack *extack)
1032 {
1033         int min_duration = length_to_duration(q, ETH_ZLEN);
1034         u32 interval = 0;
1035
1036         if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
1037                 entry->command = nla_get_u8(
1038                         tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
1039
1040         if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
1041                 entry->gate_mask = nla_get_u32(
1042                         tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
1043
1044         if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
1045                 interval = nla_get_u32(
1046                         tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
1047
1048         /* The interval should allow at least the minimum ethernet
1049          * frame to go out.
1050          */
1051         if (interval < min_duration) {
1052                 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
1053                 return -EINVAL;
1054         }
1055
1056         entry->interval = interval;
1057
1058         return 0;
1059 }
1060
1061 static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
1062                              struct sched_entry *entry, int index,
1063                              struct netlink_ext_ack *extack)
1064 {
1065         struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
1066         int err;
1067
1068         err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
1069                                           entry_policy, NULL);
1070         if (err < 0) {
1071                 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
1072                 return -EINVAL;
1073         }
1074
1075         entry->index = index;
1076
1077         return fill_sched_entry(q, tb, entry, extack);
1078 }
1079
1080 static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
1081                             struct sched_gate_list *sched,
1082                             struct netlink_ext_ack *extack)
1083 {
1084         struct nlattr *n;
1085         int err, rem;
1086         int i = 0;
1087
1088         if (!list)
1089                 return -EINVAL;
1090
1091         nla_for_each_nested(n, list, rem) {
1092                 struct sched_entry *entry;
1093
1094                 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
1095                         NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
1096                         continue;
1097                 }
1098
1099                 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1100                 if (!entry) {
1101                         NL_SET_ERR_MSG(extack, "Not enough memory for entry");
1102                         return -ENOMEM;
1103                 }
1104
1105                 err = parse_sched_entry(q, n, entry, i, extack);
1106                 if (err < 0) {
1107                         kfree(entry);
1108                         return err;
1109                 }
1110
1111                 list_add_tail(&entry->list, &sched->entries);
1112                 i++;
1113         }
1114
1115         sched->num_entries = i;
1116
1117         return i;
1118 }
1119
1120 static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
1121                                  struct sched_gate_list *new,
1122                                  struct netlink_ext_ack *extack)
1123 {
1124         int err = 0;
1125
1126         if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
1127                 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
1128                 return -ENOTSUPP;
1129         }
1130
1131         if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
1132                 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
1133
1134         if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
1135                 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
1136
1137         if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
1138                 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
1139
1140         if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
1141                 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
1142                                        new, extack);
1143         if (err < 0)
1144                 return err;
1145
1146         if (!new->cycle_time) {
1147                 struct sched_entry *entry;
1148                 ktime_t cycle = 0;
1149
1150                 list_for_each_entry(entry, &new->entries, list)
1151                         cycle = ktime_add_ns(cycle, entry->interval);
1152
1153                 if (!cycle) {
1154                         NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
1155                         return -EINVAL;
1156                 }
1157
1158                 if (cycle < 0 || cycle > INT_MAX) {
1159                         NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
1160                         return -EINVAL;
1161                 }
1162
1163                 new->cycle_time = cycle;
1164         }
1165
1166         taprio_calculate_gate_durations(q, new);
1167
1168         return 0;
1169 }
1170
1171 static int taprio_parse_mqprio_opt(struct net_device *dev,
1172                                    struct tc_mqprio_qopt *qopt,
1173                                    struct netlink_ext_ack *extack,
1174                                    u32 taprio_flags)
1175 {
1176         bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
1177
1178         if (!qopt && !dev->num_tc) {
1179                 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
1180                 return -EINVAL;
1181         }
1182
1183         /* If num_tc is already set, it means that the user already
1184          * configured the mqprio part
1185          */
1186         if (dev->num_tc)
1187                 return 0;
1188
1189         /* taprio imposes that traffic classes map 1:n to tx queues */
1190         if (qopt->num_tc > dev->num_tx_queues) {
1191                 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
1192                 return -EINVAL;
1193         }
1194
1195         /* For some reason, in txtime-assist mode, we allow TXQ ranges for
1196          * different TCs to overlap, and just validate the TXQ ranges.
1197          */
1198         return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs,
1199                                     extack);
1200 }
1201
1202 static int taprio_get_start_time(struct Qdisc *sch,
1203                                  struct sched_gate_list *sched,
1204                                  ktime_t *start)
1205 {
1206         struct taprio_sched *q = qdisc_priv(sch);
1207         ktime_t now, base, cycle;
1208         s64 n;
1209
1210         base = sched_base_time(sched);
1211         now = taprio_get_time(q);
1212
1213         if (ktime_after(base, now)) {
1214                 *start = base;
1215                 return 0;
1216         }
1217
1218         cycle = sched->cycle_time;
1219
1220         /* The qdisc is expected to have at least one sched_entry.  Moreover,
1221          * any entry must have 'interval' > 0. Thus if the cycle time is zero,
1222          * something went really wrong. In that case, we should warn about this
1223          * inconsistent state and return error.
1224          */
1225         if (WARN_ON(!cycle))
1226                 return -EFAULT;
1227
1228         /* Schedule the start time for the beginning of the next
1229          * cycle.
1230          */
1231         n = div64_s64(ktime_sub_ns(now, base), cycle);
1232         *start = ktime_add_ns(base, (n + 1) * cycle);
1233         return 0;
1234 }
1235
1236 static void setup_first_end_time(struct taprio_sched *q,
1237                                  struct sched_gate_list *sched, ktime_t base)
1238 {
1239         struct net_device *dev = qdisc_dev(q->root);
1240         int num_tc = netdev_get_num_tc(dev);
1241         struct sched_entry *first;
1242         ktime_t cycle;
1243         int tc;
1244
1245         first = list_first_entry(&sched->entries,
1246                                  struct sched_entry, list);
1247
1248         cycle = sched->cycle_time;
1249
1250         /* FIXME: find a better place to do this */
1251         sched->cycle_end_time = ktime_add_ns(base, cycle);
1252
1253         first->end_time = ktime_add_ns(base, first->interval);
1254         taprio_set_budgets(q, sched, first);
1255
1256         for (tc = 0; tc < num_tc; tc++) {
1257                 if (first->gate_duration[tc] == sched->cycle_time)
1258                         first->gate_close_time[tc] = KTIME_MAX;
1259                 else
1260                         first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]);
1261         }
1262
1263         rcu_assign_pointer(q->current_entry, NULL);
1264 }
1265
1266 static void taprio_start_sched(struct Qdisc *sch,
1267                                ktime_t start, struct sched_gate_list *new)
1268 {
1269         struct taprio_sched *q = qdisc_priv(sch);
1270         ktime_t expires;
1271
1272         if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1273                 return;
1274
1275         expires = hrtimer_get_expires(&q->advance_timer);
1276         if (expires == 0)
1277                 expires = KTIME_MAX;
1278
1279         /* If the new schedule starts before the next expiration, we
1280          * reprogram it to the earliest one, so we change the admin
1281          * schedule to the operational one at the right time.
1282          */
1283         start = min_t(ktime_t, start, expires);
1284
1285         hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1286 }
1287
1288 static void taprio_set_picos_per_byte(struct net_device *dev,
1289                                       struct taprio_sched *q)
1290 {
1291         struct ethtool_link_ksettings ecmd;
1292         int speed = SPEED_10;
1293         int picos_per_byte;
1294         int err;
1295
1296         err = __ethtool_get_link_ksettings(dev, &ecmd);
1297         if (err < 0)
1298                 goto skip;
1299
1300         if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1301                 speed = ecmd.base.speed;
1302
1303 skip:
1304         picos_per_byte = (USEC_PER_SEC * 8) / speed;
1305
1306         atomic64_set(&q->picos_per_byte, picos_per_byte);
1307         netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1308                    dev->name, (long long)atomic64_read(&q->picos_per_byte),
1309                    ecmd.base.speed);
1310 }
1311
1312 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1313                                void *ptr)
1314 {
1315         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1316         struct sched_gate_list *oper, *admin;
1317         struct qdisc_size_table *stab;
1318         struct taprio_sched *q;
1319
1320         ASSERT_RTNL();
1321
1322         if (event != NETDEV_UP && event != NETDEV_CHANGE)
1323                 return NOTIFY_DONE;
1324
1325         list_for_each_entry(q, &taprio_list, taprio_list) {
1326                 if (dev != qdisc_dev(q->root))
1327                         continue;
1328
1329                 taprio_set_picos_per_byte(dev, q);
1330
1331                 stab = rtnl_dereference(q->root->stab);
1332
1333                 oper = rtnl_dereference(q->oper_sched);
1334                 if (oper)
1335                         taprio_update_queue_max_sdu(q, oper, stab);
1336
1337                 admin = rtnl_dereference(q->admin_sched);
1338                 if (admin)
1339                         taprio_update_queue_max_sdu(q, admin, stab);
1340
1341                 break;
1342         }
1343
1344         return NOTIFY_DONE;
1345 }
1346
1347 static void setup_txtime(struct taprio_sched *q,
1348                          struct sched_gate_list *sched, ktime_t base)
1349 {
1350         struct sched_entry *entry;
1351         u64 interval = 0;
1352
1353         list_for_each_entry(entry, &sched->entries, list) {
1354                 entry->next_txtime = ktime_add_ns(base, interval);
1355                 interval += entry->interval;
1356         }
1357 }
1358
1359 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1360 {
1361         struct __tc_taprio_qopt_offload *__offload;
1362
1363         __offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
1364                             GFP_KERNEL);
1365         if (!__offload)
1366                 return NULL;
1367
1368         refcount_set(&__offload->users, 1);
1369
1370         return &__offload->offload;
1371 }
1372
1373 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1374                                                   *offload)
1375 {
1376         struct __tc_taprio_qopt_offload *__offload;
1377
1378         __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1379                                  offload);
1380
1381         refcount_inc(&__offload->users);
1382
1383         return offload;
1384 }
1385 EXPORT_SYMBOL_GPL(taprio_offload_get);
1386
1387 void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1388 {
1389         struct __tc_taprio_qopt_offload *__offload;
1390
1391         __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1392                                  offload);
1393
1394         if (!refcount_dec_and_test(&__offload->users))
1395                 return;
1396
1397         kfree(__offload);
1398 }
1399 EXPORT_SYMBOL_GPL(taprio_offload_free);
1400
1401 /* The function will only serve to keep the pointers to the "oper" and "admin"
1402  * schedules valid in relation to their base times, so when calling dump() the
1403  * users looks at the right schedules.
1404  * When using full offload, the admin configuration is promoted to oper at the
1405  * base_time in the PHC time domain.  But because the system time is not
1406  * necessarily in sync with that, we can't just trigger a hrtimer to call
1407  * switch_schedules at the right hardware time.
1408  * At the moment we call this by hand right away from taprio, but in the future
1409  * it will be useful to create a mechanism for drivers to notify taprio of the
1410  * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1411  * This is left as TODO.
1412  */
1413 static void taprio_offload_config_changed(struct taprio_sched *q)
1414 {
1415         struct sched_gate_list *oper, *admin;
1416
1417         oper = rtnl_dereference(q->oper_sched);
1418         admin = rtnl_dereference(q->admin_sched);
1419
1420         switch_schedules(q, &admin, &oper);
1421 }
1422
1423 static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
1424 {
1425         u32 i, queue_mask = 0;
1426
1427         for (i = 0; i < dev->num_tc; i++) {
1428                 u32 offset, count;
1429
1430                 if (!(tc_mask & BIT(i)))
1431                         continue;
1432
1433                 offset = dev->tc_to_txq[i].offset;
1434                 count = dev->tc_to_txq[i].count;
1435
1436                 queue_mask |= GENMASK(offset + count - 1, offset);
1437         }
1438
1439         return queue_mask;
1440 }
1441
1442 static void taprio_sched_to_offload(struct net_device *dev,
1443                                     struct sched_gate_list *sched,
1444                                     struct tc_taprio_qopt_offload *offload,
1445                                     const struct tc_taprio_caps *caps)
1446 {
1447         struct sched_entry *entry;
1448         int i = 0;
1449
1450         offload->base_time = sched->base_time;
1451         offload->cycle_time = sched->cycle_time;
1452         offload->cycle_time_extension = sched->cycle_time_extension;
1453
1454         list_for_each_entry(entry, &sched->entries, list) {
1455                 struct tc_taprio_sched_entry *e = &offload->entries[i];
1456
1457                 e->command = entry->command;
1458                 e->interval = entry->interval;
1459                 if (caps->gate_mask_per_txq)
1460                         e->gate_mask = tc_map_to_queue_mask(dev,
1461                                                             entry->gate_mask);
1462                 else
1463                         e->gate_mask = entry->gate_mask;
1464
1465                 i++;
1466         }
1467
1468         offload->num_entries = i;
1469 }
1470
1471 static void taprio_detect_broken_mqprio(struct taprio_sched *q)
1472 {
1473         struct net_device *dev = qdisc_dev(q->root);
1474         struct tc_taprio_caps caps;
1475
1476         qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
1477                                  &caps, sizeof(caps));
1478
1479         q->broken_mqprio = caps.broken_mqprio;
1480         if (q->broken_mqprio)
1481                 static_branch_inc(&taprio_have_broken_mqprio);
1482         else
1483                 static_branch_inc(&taprio_have_working_mqprio);
1484
1485         q->detected_mqprio = true;
1486 }
1487
1488 static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
1489 {
1490         if (!q->detected_mqprio)
1491                 return;
1492
1493         if (q->broken_mqprio)
1494                 static_branch_dec(&taprio_have_broken_mqprio);
1495         else
1496                 static_branch_dec(&taprio_have_working_mqprio);
1497 }
1498
1499 static int taprio_enable_offload(struct net_device *dev,
1500                                  struct taprio_sched *q,
1501                                  struct sched_gate_list *sched,
1502                                  struct netlink_ext_ack *extack)
1503 {
1504         const struct net_device_ops *ops = dev->netdev_ops;
1505         struct tc_taprio_qopt_offload *offload;
1506         struct tc_taprio_caps caps;
1507         int tc, err = 0;
1508
1509         if (!ops->ndo_setup_tc) {
1510                 NL_SET_ERR_MSG(extack,
1511                                "Device does not support taprio offload");
1512                 return -EOPNOTSUPP;
1513         }
1514
1515         qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
1516                                  &caps, sizeof(caps));
1517
1518         if (!caps.supports_queue_max_sdu) {
1519                 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
1520                         if (q->max_sdu[tc]) {
1521                                 NL_SET_ERR_MSG_MOD(extack,
1522                                                    "Device does not handle queueMaxSDU");
1523                                 return -EOPNOTSUPP;
1524                         }
1525                 }
1526         }
1527
1528         offload = taprio_offload_alloc(sched->num_entries);
1529         if (!offload) {
1530                 NL_SET_ERR_MSG(extack,
1531                                "Not enough memory for enabling offload mode");
1532                 return -ENOMEM;
1533         }
1534         offload->cmd = TAPRIO_CMD_REPLACE;
1535         offload->extack = extack;
1536         mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt);
1537         offload->mqprio.extack = extack;
1538         taprio_sched_to_offload(dev, sched, offload, &caps);
1539         mqprio_fp_to_offload(q->fp, &offload->mqprio);
1540
1541         for (tc = 0; tc < TC_MAX_QUEUE; tc++)
1542                 offload->max_sdu[tc] = q->max_sdu[tc];
1543
1544         err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1545         if (err < 0) {
1546                 NL_SET_ERR_MSG_WEAK(extack,
1547                                     "Device failed to setup taprio offload");
1548                 goto done;
1549         }
1550
1551         q->offloaded = true;
1552
1553 done:
1554         /* The offload structure may linger around via a reference taken by the
1555          * device driver, so clear up the netlink extack pointer so that the
1556          * driver isn't tempted to dereference data which stopped being valid
1557          */
1558         offload->extack = NULL;
1559         offload->mqprio.extack = NULL;
1560         taprio_offload_free(offload);
1561
1562         return err;
1563 }
1564
1565 static int taprio_disable_offload(struct net_device *dev,
1566                                   struct taprio_sched *q,
1567                                   struct netlink_ext_ack *extack)
1568 {
1569         const struct net_device_ops *ops = dev->netdev_ops;
1570         struct tc_taprio_qopt_offload *offload;
1571         int err;
1572
1573         if (!q->offloaded)
1574                 return 0;
1575
1576         offload = taprio_offload_alloc(0);
1577         if (!offload) {
1578                 NL_SET_ERR_MSG(extack,
1579                                "Not enough memory to disable offload mode");
1580                 return -ENOMEM;
1581         }
1582         offload->cmd = TAPRIO_CMD_DESTROY;
1583
1584         err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1585         if (err < 0) {
1586                 NL_SET_ERR_MSG(extack,
1587                                "Device failed to disable offload");
1588                 goto out;
1589         }
1590
1591         q->offloaded = false;
1592
1593 out:
1594         taprio_offload_free(offload);
1595
1596         return err;
1597 }
1598
1599 /* If full offload is enabled, the only possible clockid is the net device's
1600  * PHC. For that reason, specifying a clockid through netlink is incorrect.
1601  * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1602  * in sync with the specified clockid via a user space daemon such as phc2sys.
1603  * For both software taprio and txtime-assist, the clockid is used for the
1604  * hrtimer that advances the schedule and hence mandatory.
1605  */
1606 static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1607                                 struct netlink_ext_ack *extack)
1608 {
1609         struct taprio_sched *q = qdisc_priv(sch);
1610         struct net_device *dev = qdisc_dev(sch);
1611         int err = -EINVAL;
1612
1613         if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1614                 const struct ethtool_ops *ops = dev->ethtool_ops;
1615                 struct ethtool_ts_info info = {
1616                         .cmd = ETHTOOL_GET_TS_INFO,
1617                         .phc_index = -1,
1618                 };
1619
1620                 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1621                         NL_SET_ERR_MSG(extack,
1622                                        "The 'clockid' cannot be specified for full offload");
1623                         goto out;
1624                 }
1625
1626                 if (ops && ops->get_ts_info)
1627                         err = ops->get_ts_info(dev, &info);
1628
1629                 if (err || info.phc_index < 0) {
1630                         NL_SET_ERR_MSG(extack,
1631                                        "Device does not have a PTP clock");
1632                         err = -ENOTSUPP;
1633                         goto out;
1634                 }
1635         } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1636                 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1637                 enum tk_offsets tk_offset;
1638
1639                 /* We only support static clockids and we don't allow
1640                  * for it to be modified after the first init.
1641                  */
1642                 if (clockid < 0 ||
1643                     (q->clockid != -1 && q->clockid != clockid)) {
1644                         NL_SET_ERR_MSG(extack,
1645                                        "Changing the 'clockid' of a running schedule is not supported");
1646                         err = -ENOTSUPP;
1647                         goto out;
1648                 }
1649
1650                 switch (clockid) {
1651                 case CLOCK_REALTIME:
1652                         tk_offset = TK_OFFS_REAL;
1653                         break;
1654                 case CLOCK_MONOTONIC:
1655                         tk_offset = TK_OFFS_MAX;
1656                         break;
1657                 case CLOCK_BOOTTIME:
1658                         tk_offset = TK_OFFS_BOOT;
1659                         break;
1660                 case CLOCK_TAI:
1661                         tk_offset = TK_OFFS_TAI;
1662                         break;
1663                 default:
1664                         NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1665                         err = -EINVAL;
1666                         goto out;
1667                 }
1668                 /* This pairs with READ_ONCE() in taprio_mono_to_any */
1669                 WRITE_ONCE(q->tk_offset, tk_offset);
1670
1671                 q->clockid = clockid;
1672         } else {
1673                 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1674                 goto out;
1675         }
1676
1677         /* Everything went ok, return success. */
1678         err = 0;
1679
1680 out:
1681         return err;
1682 }
1683
1684 static int taprio_parse_tc_entry(struct Qdisc *sch,
1685                                  struct nlattr *opt,
1686                                  u32 max_sdu[TC_QOPT_MAX_QUEUE],
1687                                  u32 fp[TC_QOPT_MAX_QUEUE],
1688                                  unsigned long *seen_tcs,
1689                                  struct netlink_ext_ack *extack)
1690 {
1691         struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { };
1692         struct net_device *dev = qdisc_dev(sch);
1693         int err, tc;
1694         u32 val;
1695
1696         err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt,
1697                                taprio_tc_policy, extack);
1698         if (err < 0)
1699                 return err;
1700
1701         if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
1702                 NL_SET_ERR_MSG_MOD(extack, "TC entry index missing");
1703                 return -EINVAL;
1704         }
1705
1706         tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
1707         if (tc >= TC_QOPT_MAX_QUEUE) {
1708                 NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range");
1709                 return -ERANGE;
1710         }
1711
1712         if (*seen_tcs & BIT(tc)) {
1713                 NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry");
1714                 return -EINVAL;
1715         }
1716
1717         *seen_tcs |= BIT(tc);
1718
1719         if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) {
1720                 val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]);
1721                 if (val > dev->max_mtu) {
1722                         NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU");
1723                         return -ERANGE;
1724                 }
1725
1726                 max_sdu[tc] = val;
1727         }
1728
1729         if (tb[TCA_TAPRIO_TC_ENTRY_FP])
1730                 fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]);
1731
1732         return 0;
1733 }
1734
1735 static int taprio_parse_tc_entries(struct Qdisc *sch,
1736                                    struct nlattr *opt,
1737                                    struct netlink_ext_ack *extack)
1738 {
1739         struct taprio_sched *q = qdisc_priv(sch);
1740         struct net_device *dev = qdisc_dev(sch);
1741         u32 max_sdu[TC_QOPT_MAX_QUEUE];
1742         bool have_preemption = false;
1743         unsigned long seen_tcs = 0;
1744         u32 fp[TC_QOPT_MAX_QUEUE];
1745         struct nlattr *n;
1746         int tc, rem;
1747         int err = 0;
1748
1749         for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
1750                 max_sdu[tc] = q->max_sdu[tc];
1751                 fp[tc] = q->fp[tc];
1752         }
1753
1754         nla_for_each_nested(n, opt, rem) {
1755                 if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
1756                         continue;
1757
1758                 err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs,
1759                                             extack);
1760                 if (err)
1761                         return err;
1762         }
1763
1764         for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) {
1765                 q->max_sdu[tc] = max_sdu[tc];
1766                 q->fp[tc] = fp[tc];
1767                 if (fp[tc] != TC_FP_EXPRESS)
1768                         have_preemption = true;
1769         }
1770
1771         if (have_preemption) {
1772                 if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1773                         NL_SET_ERR_MSG(extack,
1774                                        "Preemption only supported with full offload");
1775                         return -EOPNOTSUPP;
1776                 }
1777
1778                 if (!ethtool_dev_mm_supported(dev)) {
1779                         NL_SET_ERR_MSG(extack,
1780                                        "Device does not support preemption");
1781                         return -EOPNOTSUPP;
1782                 }
1783         }
1784
1785         return err;
1786 }
1787
1788 static int taprio_mqprio_cmp(const struct net_device *dev,
1789                              const struct tc_mqprio_qopt *mqprio)
1790 {
1791         int i;
1792
1793         if (!mqprio || mqprio->num_tc != dev->num_tc)
1794                 return -1;
1795
1796         for (i = 0; i < mqprio->num_tc; i++)
1797                 if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1798                     dev->tc_to_txq[i].offset != mqprio->offset[i])
1799                         return -1;
1800
1801         for (i = 0; i <= TC_BITMASK; i++)
1802                 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1803                         return -1;
1804
1805         return 0;
1806 }
1807
1808 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1809                          struct netlink_ext_ack *extack)
1810 {
1811         struct qdisc_size_table *stab = rtnl_dereference(sch->stab);
1812         struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1813         struct sched_gate_list *oper, *admin, *new_admin;
1814         struct taprio_sched *q = qdisc_priv(sch);
1815         struct net_device *dev = qdisc_dev(sch);
1816         struct tc_mqprio_qopt *mqprio = NULL;
1817         unsigned long flags;
1818         u32 taprio_flags;
1819         ktime_t start;
1820         int i, err;
1821
1822         err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1823                                           taprio_policy, extack);
1824         if (err < 0)
1825                 return err;
1826
1827         if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1828                 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1829
1830         /* The semantics of the 'flags' argument in relation to 'change()'
1831          * requests, are interpreted following two rules (which are applied in
1832          * this order): (1) an omitted 'flags' argument is interpreted as
1833          * zero; (2) the 'flags' of a "running" taprio instance cannot be
1834          * changed.
1835          */
1836         taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0;
1837
1838         /* txtime-assist and full offload are mutually exclusive */
1839         if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
1840             (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) {
1841                 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS],
1842                                     "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive");
1843                 return -EINVAL;
1844         }
1845
1846         if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) {
1847                 NL_SET_ERR_MSG_MOD(extack,
1848                                    "Changing 'flags' of a running schedule is not supported");
1849                 return -EOPNOTSUPP;
1850         }
1851         q->flags = taprio_flags;
1852
1853         err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
1854         if (err < 0)
1855                 return err;
1856
1857         err = taprio_parse_tc_entries(sch, opt, extack);
1858         if (err)
1859                 return err;
1860
1861         new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1862         if (!new_admin) {
1863                 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1864                 return -ENOMEM;
1865         }
1866         INIT_LIST_HEAD(&new_admin->entries);
1867
1868         oper = rtnl_dereference(q->oper_sched);
1869         admin = rtnl_dereference(q->admin_sched);
1870
1871         /* no changes - no new mqprio settings */
1872         if (!taprio_mqprio_cmp(dev, mqprio))
1873                 mqprio = NULL;
1874
1875         if (mqprio && (oper || admin)) {
1876                 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1877                 err = -ENOTSUPP;
1878                 goto free_sched;
1879         }
1880
1881         if (mqprio) {
1882                 err = netdev_set_num_tc(dev, mqprio->num_tc);
1883                 if (err)
1884                         goto free_sched;
1885                 for (i = 0; i < mqprio->num_tc; i++) {
1886                         netdev_set_tc_queue(dev, i,
1887                                             mqprio->count[i],
1888                                             mqprio->offset[i]);
1889                         q->cur_txq[i] = mqprio->offset[i];
1890                 }
1891
1892                 /* Always use supplied priority mappings */
1893                 for (i = 0; i <= TC_BITMASK; i++)
1894                         netdev_set_prio_tc_map(dev, i,
1895                                                mqprio->prio_tc_map[i]);
1896         }
1897
1898         err = parse_taprio_schedule(q, tb, new_admin, extack);
1899         if (err < 0)
1900                 goto free_sched;
1901
1902         if (new_admin->num_entries == 0) {
1903                 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1904                 err = -EINVAL;
1905                 goto free_sched;
1906         }
1907
1908         err = taprio_parse_clockid(sch, tb, extack);
1909         if (err < 0)
1910                 goto free_sched;
1911
1912         taprio_set_picos_per_byte(dev, q);
1913         taprio_update_queue_max_sdu(q, new_admin, stab);
1914
1915         if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1916                 err = taprio_enable_offload(dev, q, new_admin, extack);
1917         else
1918                 err = taprio_disable_offload(dev, q, extack);
1919         if (err)
1920                 goto free_sched;
1921
1922         /* Protects against enqueue()/dequeue() */
1923         spin_lock_bh(qdisc_lock(sch));
1924
1925         if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1926                 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1927                         NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1928                         err = -EINVAL;
1929                         goto unlock;
1930                 }
1931
1932                 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1933         }
1934
1935         if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1936             !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1937             !hrtimer_active(&q->advance_timer)) {
1938                 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1939                 q->advance_timer.function = advance_sched;
1940         }
1941
1942         err = taprio_get_start_time(sch, new_admin, &start);
1943         if (err < 0) {
1944                 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1945                 goto unlock;
1946         }
1947
1948         setup_txtime(q, new_admin, start);
1949
1950         if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1951                 if (!oper) {
1952                         rcu_assign_pointer(q->oper_sched, new_admin);
1953                         err = 0;
1954                         new_admin = NULL;
1955                         goto unlock;
1956                 }
1957
1958                 rcu_assign_pointer(q->admin_sched, new_admin);
1959                 if (admin)
1960                         call_rcu(&admin->rcu, taprio_free_sched_cb);
1961         } else {
1962                 setup_first_end_time(q, new_admin, start);
1963
1964                 /* Protects against advance_sched() */
1965                 spin_lock_irqsave(&q->current_entry_lock, flags);
1966
1967                 taprio_start_sched(sch, start, new_admin);
1968
1969                 rcu_assign_pointer(q->admin_sched, new_admin);
1970                 if (admin)
1971                         call_rcu(&admin->rcu, taprio_free_sched_cb);
1972
1973                 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1974
1975                 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1976                         taprio_offload_config_changed(q);
1977         }
1978
1979         new_admin = NULL;
1980         err = 0;
1981
1982         if (!stab)
1983                 NL_SET_ERR_MSG_MOD(extack,
1984                                    "Size table not specified, frame length estimations may be inaccurate");
1985
1986 unlock:
1987         spin_unlock_bh(qdisc_lock(sch));
1988
1989 free_sched:
1990         if (new_admin)
1991                 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1992
1993         return err;
1994 }
1995
1996 static void taprio_reset(struct Qdisc *sch)
1997 {
1998         struct taprio_sched *q = qdisc_priv(sch);
1999         struct net_device *dev = qdisc_dev(sch);
2000         int i;
2001
2002         hrtimer_cancel(&q->advance_timer);
2003
2004         if (q->qdiscs) {
2005                 for (i = 0; i < dev->num_tx_queues; i++)
2006                         if (q->qdiscs[i])
2007                                 qdisc_reset(q->qdiscs[i]);
2008         }
2009 }
2010
2011 static void taprio_destroy(struct Qdisc *sch)
2012 {
2013         struct taprio_sched *q = qdisc_priv(sch);
2014         struct net_device *dev = qdisc_dev(sch);
2015         struct sched_gate_list *oper, *admin;
2016         unsigned int i;
2017
2018         list_del(&q->taprio_list);
2019
2020         /* Note that taprio_reset() might not be called if an error
2021          * happens in qdisc_create(), after taprio_init() has been called.
2022          */
2023         hrtimer_cancel(&q->advance_timer);
2024         qdisc_synchronize(sch);
2025
2026         taprio_disable_offload(dev, q, NULL);
2027
2028         if (q->qdiscs) {
2029                 for (i = 0; i < dev->num_tx_queues; i++)
2030                         qdisc_put(q->qdiscs[i]);
2031
2032                 kfree(q->qdiscs);
2033         }
2034         q->qdiscs = NULL;
2035
2036         netdev_reset_tc(dev);
2037
2038         oper = rtnl_dereference(q->oper_sched);
2039         admin = rtnl_dereference(q->admin_sched);
2040
2041         if (oper)
2042                 call_rcu(&oper->rcu, taprio_free_sched_cb);
2043
2044         if (admin)
2045                 call_rcu(&admin->rcu, taprio_free_sched_cb);
2046
2047         taprio_cleanup_broken_mqprio(q);
2048 }
2049
2050 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
2051                        struct netlink_ext_ack *extack)
2052 {
2053         struct taprio_sched *q = qdisc_priv(sch);
2054         struct net_device *dev = qdisc_dev(sch);
2055         int i, tc;
2056
2057         spin_lock_init(&q->current_entry_lock);
2058
2059         hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
2060         q->advance_timer.function = advance_sched;
2061
2062         q->root = sch;
2063
2064         /* We only support static clockids. Use an invalid value as default
2065          * and get the valid one on taprio_change().
2066          */
2067         q->clockid = -1;
2068         q->flags = TAPRIO_FLAGS_INVALID;
2069
2070         list_add(&q->taprio_list, &taprio_list);
2071
2072         if (sch->parent != TC_H_ROOT) {
2073                 NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc");
2074                 return -EOPNOTSUPP;
2075         }
2076
2077         if (!netif_is_multiqueue(dev)) {
2078                 NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required");
2079                 return -EOPNOTSUPP;
2080         }
2081
2082         q->qdiscs = kcalloc(dev->num_tx_queues, sizeof(q->qdiscs[0]),
2083                             GFP_KERNEL);
2084         if (!q->qdiscs)
2085                 return -ENOMEM;
2086
2087         if (!opt)
2088                 return -EINVAL;
2089
2090         for (i = 0; i < dev->num_tx_queues; i++) {
2091                 struct netdev_queue *dev_queue;
2092                 struct Qdisc *qdisc;
2093
2094                 dev_queue = netdev_get_tx_queue(dev, i);
2095                 qdisc = qdisc_create_dflt(dev_queue,
2096                                           &pfifo_qdisc_ops,
2097                                           TC_H_MAKE(TC_H_MAJ(sch->handle),
2098                                                     TC_H_MIN(i + 1)),
2099                                           extack);
2100                 if (!qdisc)
2101                         return -ENOMEM;
2102
2103                 if (i < dev->real_num_tx_queues)
2104                         qdisc_hash_add(qdisc, false);
2105
2106                 q->qdiscs[i] = qdisc;
2107         }
2108
2109         for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
2110                 q->fp[tc] = TC_FP_EXPRESS;
2111
2112         taprio_detect_broken_mqprio(q);
2113
2114         return taprio_change(sch, opt, extack);
2115 }
2116
2117 static void taprio_attach(struct Qdisc *sch)
2118 {
2119         struct taprio_sched *q = qdisc_priv(sch);
2120         struct net_device *dev = qdisc_dev(sch);
2121         unsigned int ntx;
2122
2123         /* Attach underlying qdisc */
2124         for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
2125                 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
2126                 struct Qdisc *old, *dev_queue_qdisc;
2127
2128                 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
2129                         struct Qdisc *qdisc = q->qdiscs[ntx];
2130
2131                         /* In offload mode, the root taprio qdisc is bypassed
2132                          * and the netdev TX queues see the children directly
2133                          */
2134                         qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2135                         dev_queue_qdisc = qdisc;
2136                 } else {
2137                         /* In software mode, attach the root taprio qdisc
2138                          * to all netdev TX queues, so that dev_qdisc_enqueue()
2139                          * goes through taprio_enqueue().
2140                          */
2141                         dev_queue_qdisc = sch;
2142                 }
2143                 old = dev_graft_qdisc(dev_queue, dev_queue_qdisc);
2144                 /* The qdisc's refcount requires to be elevated once
2145                  * for each netdev TX queue it is grafted onto
2146                  */
2147                 qdisc_refcount_inc(dev_queue_qdisc);
2148                 if (old)
2149                         qdisc_put(old);
2150         }
2151 }
2152
2153 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
2154                                              unsigned long cl)
2155 {
2156         struct net_device *dev = qdisc_dev(sch);
2157         unsigned long ntx = cl - 1;
2158
2159         if (ntx >= dev->num_tx_queues)
2160                 return NULL;
2161
2162         return netdev_get_tx_queue(dev, ntx);
2163 }
2164
2165 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
2166                         struct Qdisc *new, struct Qdisc **old,
2167                         struct netlink_ext_ack *extack)
2168 {
2169         struct taprio_sched *q = qdisc_priv(sch);
2170         struct net_device *dev = qdisc_dev(sch);
2171         struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
2172
2173         if (!dev_queue)
2174                 return -EINVAL;
2175
2176         if (dev->flags & IFF_UP)
2177                 dev_deactivate(dev);
2178
2179         /* In offload mode, the child Qdisc is directly attached to the netdev
2180          * TX queue, and thus, we need to keep its refcount elevated in order
2181          * to counteract qdisc_graft()'s call to qdisc_put() once per TX queue.
2182          * However, save the reference to the new qdisc in the private array in
2183          * both software and offload cases, to have an up-to-date reference to
2184          * our children.
2185          */
2186         *old = q->qdiscs[cl - 1];
2187         if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
2188                 WARN_ON_ONCE(dev_graft_qdisc(dev_queue, new) != *old);
2189                 if (new)
2190                         qdisc_refcount_inc(new);
2191                 if (*old)
2192                         qdisc_put(*old);
2193         }
2194
2195         q->qdiscs[cl - 1] = new;
2196         if (new)
2197                 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
2198
2199         if (dev->flags & IFF_UP)
2200                 dev_activate(dev);
2201
2202         return 0;
2203 }
2204
2205 static int dump_entry(struct sk_buff *msg,
2206                       const struct sched_entry *entry)
2207 {
2208         struct nlattr *item;
2209
2210         item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
2211         if (!item)
2212                 return -ENOSPC;
2213
2214         if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
2215                 goto nla_put_failure;
2216
2217         if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
2218                 goto nla_put_failure;
2219
2220         if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
2221                         entry->gate_mask))
2222                 goto nla_put_failure;
2223
2224         if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
2225                         entry->interval))
2226                 goto nla_put_failure;
2227
2228         return nla_nest_end(msg, item);
2229
2230 nla_put_failure:
2231         nla_nest_cancel(msg, item);
2232         return -1;
2233 }
2234
2235 static int dump_schedule(struct sk_buff *msg,
2236                          const struct sched_gate_list *root)
2237 {
2238         struct nlattr *entry_list;
2239         struct sched_entry *entry;
2240
2241         if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
2242                         root->base_time, TCA_TAPRIO_PAD))
2243                 return -1;
2244
2245         if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
2246                         root->cycle_time, TCA_TAPRIO_PAD))
2247                 return -1;
2248
2249         if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
2250                         root->cycle_time_extension, TCA_TAPRIO_PAD))
2251                 return -1;
2252
2253         entry_list = nla_nest_start_noflag(msg,
2254                                            TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
2255         if (!entry_list)
2256                 goto error_nest;
2257
2258         list_for_each_entry(entry, &root->entries, list) {
2259                 if (dump_entry(msg, entry) < 0)
2260                         goto error_nest;
2261         }
2262
2263         nla_nest_end(msg, entry_list);
2264         return 0;
2265
2266 error_nest:
2267         nla_nest_cancel(msg, entry_list);
2268         return -1;
2269 }
2270
2271 static int taprio_dump_tc_entries(struct sk_buff *skb,
2272                                   struct taprio_sched *q,
2273                                   struct sched_gate_list *sched)
2274 {
2275         struct nlattr *n;
2276         int tc;
2277
2278         for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
2279                 n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY);
2280                 if (!n)
2281                         return -EMSGSIZE;
2282
2283                 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc))
2284                         goto nla_put_failure;
2285
2286                 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
2287                                 sched->max_sdu[tc]))
2288                         goto nla_put_failure;
2289
2290                 if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc]))
2291                         goto nla_put_failure;
2292
2293                 nla_nest_end(skb, n);
2294         }
2295
2296         return 0;
2297
2298 nla_put_failure:
2299         nla_nest_cancel(skb, n);
2300         return -EMSGSIZE;
2301 }
2302
2303 static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype)
2304 {
2305         if (val == TAPRIO_STAT_NOT_SET)
2306                 return 0;
2307         if (nla_put_u64_64bit(skb, attrtype, val, TCA_TAPRIO_OFFLOAD_STATS_PAD))
2308                 return -EMSGSIZE;
2309         return 0;
2310 }
2311
2312 static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d,
2313                               struct tc_taprio_qopt_offload *offload,
2314                               struct tc_taprio_qopt_stats *stats)
2315 {
2316         struct net_device *dev = qdisc_dev(sch);
2317         const struct net_device_ops *ops;
2318         struct sk_buff *skb = d->skb;
2319         struct nlattr *xstats;
2320         int err;
2321
2322         ops = qdisc_dev(sch)->netdev_ops;
2323
2324         /* FIXME I could use qdisc_offload_dump_helper(), but that messes
2325          * with sch->flags depending on whether the device reports taprio
2326          * stats, and I'm not sure whether that's a good idea, considering
2327          * that stats are optional to the offload itself
2328          */
2329         if (!ops->ndo_setup_tc)
2330                 return 0;
2331
2332         memset(stats, 0xff, sizeof(*stats));
2333
2334         err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
2335         if (err == -EOPNOTSUPP)
2336                 return 0;
2337         if (err)
2338                 return err;
2339
2340         xstats = nla_nest_start(skb, TCA_STATS_APP);
2341         if (!xstats)
2342                 goto err;
2343
2344         if (taprio_put_stat(skb, stats->window_drops,
2345                             TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS) ||
2346             taprio_put_stat(skb, stats->tx_overruns,
2347                             TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS))
2348                 goto err_cancel;
2349
2350         nla_nest_end(skb, xstats);
2351
2352         return 0;
2353
2354 err_cancel:
2355         nla_nest_cancel(skb, xstats);
2356 err:
2357         return -EMSGSIZE;
2358 }
2359
2360 static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2361 {
2362         struct tc_taprio_qopt_offload offload = {
2363                 .cmd = TAPRIO_CMD_STATS,
2364         };
2365
2366         return taprio_dump_xstats(sch, d, &offload, &offload.stats);
2367 }
2368
2369 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
2370 {
2371         struct taprio_sched *q = qdisc_priv(sch);
2372         struct net_device *dev = qdisc_dev(sch);
2373         struct sched_gate_list *oper, *admin;
2374         struct tc_mqprio_qopt opt = { 0 };
2375         struct nlattr *nest, *sched_nest;
2376
2377         oper = rtnl_dereference(q->oper_sched);
2378         admin = rtnl_dereference(q->admin_sched);
2379
2380         mqprio_qopt_reconstruct(dev, &opt);
2381
2382         nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2383         if (!nest)
2384                 goto start_error;
2385
2386         if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
2387                 goto options_error;
2388
2389         if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
2390             nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
2391                 goto options_error;
2392
2393         if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
2394                 goto options_error;
2395
2396         if (q->txtime_delay &&
2397             nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
2398                 goto options_error;
2399
2400         if (oper && taprio_dump_tc_entries(skb, q, oper))
2401                 goto options_error;
2402
2403         if (oper && dump_schedule(skb, oper))
2404                 goto options_error;
2405
2406         if (!admin)
2407                 goto done;
2408
2409         sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
2410         if (!sched_nest)
2411                 goto options_error;
2412
2413         if (dump_schedule(skb, admin))
2414                 goto admin_error;
2415
2416         nla_nest_end(skb, sched_nest);
2417
2418 done:
2419         return nla_nest_end(skb, nest);
2420
2421 admin_error:
2422         nla_nest_cancel(skb, sched_nest);
2423
2424 options_error:
2425         nla_nest_cancel(skb, nest);
2426
2427 start_error:
2428         return -ENOSPC;
2429 }
2430
2431 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
2432 {
2433         struct taprio_sched *q = qdisc_priv(sch);
2434         struct net_device *dev = qdisc_dev(sch);
2435         unsigned int ntx = cl - 1;
2436
2437         if (ntx >= dev->num_tx_queues)
2438                 return NULL;
2439
2440         return q->qdiscs[ntx];
2441 }
2442
2443 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
2444 {
2445         unsigned int ntx = TC_H_MIN(classid);
2446
2447         if (!taprio_queue_get(sch, ntx))
2448                 return 0;
2449         return ntx;
2450 }
2451
2452 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
2453                              struct sk_buff *skb, struct tcmsg *tcm)
2454 {
2455         struct Qdisc *child = taprio_leaf(sch, cl);
2456
2457         tcm->tcm_parent = TC_H_ROOT;
2458         tcm->tcm_handle |= TC_H_MIN(cl);
2459         tcm->tcm_info = child->handle;
2460
2461         return 0;
2462 }
2463
2464 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2465                                    struct gnet_dump *d)
2466         __releases(d->lock)
2467         __acquires(d->lock)
2468 {
2469         struct Qdisc *child = taprio_leaf(sch, cl);
2470         struct tc_taprio_qopt_offload offload = {
2471                 .cmd = TAPRIO_CMD_QUEUE_STATS,
2472                 .queue_stats = {
2473                         .queue = cl - 1,
2474                 },
2475         };
2476
2477         if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 ||
2478             qdisc_qstats_copy(d, child) < 0)
2479                 return -1;
2480
2481         return taprio_dump_xstats(sch, d, &offload, &offload.queue_stats.stats);
2482 }
2483
2484 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2485 {
2486         struct net_device *dev = qdisc_dev(sch);
2487         unsigned long ntx;
2488
2489         if (arg->stop)
2490                 return;
2491
2492         arg->count = arg->skip;
2493         for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
2494                 if (!tc_qdisc_stats_dump(sch, ntx + 1, arg))
2495                         break;
2496         }
2497 }
2498
2499 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
2500                                                 struct tcmsg *tcm)
2501 {
2502         return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
2503 }
2504
2505 static const struct Qdisc_class_ops taprio_class_ops = {
2506         .graft          = taprio_graft,
2507         .leaf           = taprio_leaf,
2508         .find           = taprio_find,
2509         .walk           = taprio_walk,
2510         .dump           = taprio_dump_class,
2511         .dump_stats     = taprio_dump_class_stats,
2512         .select_queue   = taprio_select_queue,
2513 };
2514
2515 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2516         .cl_ops         = &taprio_class_ops,
2517         .id             = "taprio",
2518         .priv_size      = sizeof(struct taprio_sched),
2519         .init           = taprio_init,
2520         .change         = taprio_change,
2521         .destroy        = taprio_destroy,
2522         .reset          = taprio_reset,
2523         .attach         = taprio_attach,
2524         .peek           = taprio_peek,
2525         .dequeue        = taprio_dequeue,
2526         .enqueue        = taprio_enqueue,
2527         .dump           = taprio_dump,
2528         .dump_stats     = taprio_dump_stats,
2529         .owner          = THIS_MODULE,
2530 };
2531 MODULE_ALIAS_NET_SCH("taprio");
2532
2533 static struct notifier_block taprio_device_notifier = {
2534         .notifier_call = taprio_dev_notifier,
2535 };
2536
2537 static int __init taprio_module_init(void)
2538 {
2539         int err = register_netdevice_notifier(&taprio_device_notifier);
2540
2541         if (err)
2542                 return err;
2543
2544         return register_qdisc(&taprio_qdisc_ops);
2545 }
2546
2547 static void __exit taprio_module_exit(void)
2548 {
2549         unregister_qdisc(&taprio_qdisc_ops);
2550         unregister_netdevice_notifier(&taprio_device_notifier);
2551 }
2552
2553 module_init(taprio_module_init);
2554 module_exit(taprio_module_exit);
2555 MODULE_LICENSE("GPL");
2556 MODULE_DESCRIPTION("Time Aware Priority qdisc");