8b8ed4e13d74dfaa63ede54f00c3ba5aac2f6ee8
[sfrench/cifs-2.6.git] / include / net / netdev_queues.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_NET_QUEUES_H
3 #define _LINUX_NET_QUEUES_H
4
5 #include <linux/netdevice.h>
6
7 /**
8  * DOC: Lockless queue stopping / waking helpers.
9  *
10  * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
11  * macros are designed to safely implement stopping
12  * and waking netdev queues without full lock protection.
13  *
14  * We assume that there can be no concurrent stop attempts and no concurrent
15  * wake attempts. The try-stop should happen from the xmit handler,
16  * while wake up should be triggered from NAPI poll context.
17  * The two may run concurrently (single producer, single consumer).
18  *
19  * The try-stop side is expected to run from the xmit handler and therefore
20  * it does not reschedule Tx (netif_tx_start_queue() instead of
21  * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
22  * handler may lead to xmit queue being enabled but not run.
23  * The waking side does not have similar context restrictions.
24  *
25  * The macros guarantee that rings will not remain stopped if there's
26  * space available, but they do *not* prevent false wake ups when
27  * the ring is full! Drivers should check for ring full at the start
28  * for the xmit handler.
29  *
30  * All descriptor ring indexes (and other relevant shared state) must
31  * be updated before invoking the macros.
32  */
33
34 #define netif_txq_try_stop(txq, get_desc, start_thrs)                   \
35         ({                                                              \
36                 int _res;                                               \
37                                                                         \
38                 netif_tx_stop_queue(txq);                               \
39                 /* Producer index and stop bit must be visible          \
40                  * to consumer before we recheck.                       \
41                  * Pairs with a barrier in __netif_txq_completed_wake(). \
42                  */                                                     \
43                 smp_mb__after_atomic();                                 \
44                                                                         \
45                 /* We need to check again in a case another             \
46                  * CPU has just made room available.                    \
47                  */                                                     \
48                 _res = 0;                                               \
49                 if (unlikely(get_desc >= start_thrs)) {                 \
50                         netif_tx_start_queue(txq);                      \
51                         _res = -1;                                      \
52                 }                                                       \
53                 _res;                                                   \
54         })                                                              \
55
56 /**
57  * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
58  * @txq:        struct netdev_queue to stop/start
59  * @get_desc:   get current number of free descriptors (see requirements below!)
60  * @stop_thrs:  minimal number of available descriptors for queue to be left
61  *              enabled
62  * @start_thrs: minimal number of descriptors to re-enable the queue, can be
63  *              equal to @stop_thrs or higher to avoid frequent waking
64  *
65  * All arguments may be evaluated multiple times, beware of side effects.
66  * @get_desc must be a formula or a function call, it must always
67  * return up-to-date information when evaluated!
68  * Expected to be used from ndo_start_xmit, see the comment on top of the file.
69  *
70  * Returns:
71  *       0 if the queue was stopped
72  *       1 if the queue was left enabled
73  *      -1 if the queue was re-enabled (raced with waking)
74  */
75 #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs)      \
76         ({                                                              \
77                 int _res;                                               \
78                                                                         \
79                 _res = 1;                                               \
80                 if (unlikely(get_desc < stop_thrs))                     \
81                         _res = netif_txq_try_stop(txq, get_desc, start_thrs); \
82                 _res;                                                   \
83         })                                                              \
84
85 /* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
86  * @bytes != 0, regardless of kernel config.
87  */
88 static inline void
89 netdev_txq_completed_mb(struct netdev_queue *dev_queue,
90                         unsigned int pkts, unsigned int bytes)
91 {
92         if (IS_ENABLED(CONFIG_BQL))
93                 netdev_tx_completed_queue(dev_queue, pkts, bytes);
94         else if (bytes)
95                 smp_mb();
96 }
97
98 /**
99  * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
100  * @txq:        struct netdev_queue to stop/start
101  * @pkts:       number of packets completed
102  * @bytes:      number of bytes completed
103  * @get_desc:   get current number of free descriptors (see requirements below!)
104  * @start_thrs: minimal number of descriptors to re-enable the queue
105  * @down_cond:  down condition, predicate indicating that the queue should
106  *              not be woken up even if descriptors are available
107  *
108  * All arguments may be evaluated multiple times.
109  * @get_desc must be a formula or a function call, it must always
110  * return up-to-date information when evaluated!
111  * Reports completed pkts/bytes to BQL.
112  *
113  * Returns:
114  *       0 if the queue was woken up
115  *       1 if the queue was already enabled (or disabled but @down_cond is true)
116  *      -1 if the queue was left unchanged (@start_thrs not reached)
117  */
118 #define __netif_txq_completed_wake(txq, pkts, bytes,                    \
119                                    get_desc, start_thrs, down_cond)     \
120         ({                                                              \
121                 int _res;                                               \
122                                                                         \
123                 /* Report to BQL and piggy back on its barrier.         \
124                  * Barrier makes sure that anybody stopping the queue   \
125                  * after this point sees the new consumer index.        \
126                  * Pairs with barrier in netif_txq_try_stop().          \
127                  */                                                     \
128                 netdev_txq_completed_mb(txq, pkts, bytes);              \
129                                                                         \
130                 _res = -1;                                              \
131                 if (pkts && likely(get_desc >= start_thrs)) {           \
132                         _res = 1;                                       \
133                         if (unlikely(netif_tx_queue_stopped(txq)) &&    \
134                             !(down_cond)) {                             \
135                                 netif_tx_wake_queue(txq);               \
136                                 _res = 0;                               \
137                         }                                               \
138                 }                                                       \
139                 _res;                                                   \
140         })
141
142 #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
143         __netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
144
145 /* subqueue variants follow */
146
147 #define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs)         \
148         ({                                                              \
149                 struct netdev_queue *txq;                               \
150                                                                         \
151                 txq = netdev_get_tx_queue(dev, idx);                    \
152                 netif_txq_try_stop(txq, get_desc, start_thrs);          \
153         })
154
155 #define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
156         ({                                                              \
157                 struct netdev_queue *txq;                               \
158                                                                         \
159                 txq = netdev_get_tx_queue(dev, idx);                    \
160                 netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
161         })
162
163 #define netif_subqueue_completed_wake(dev, idx, pkts, bytes,            \
164                                       get_desc, start_thrs)             \
165         ({                                                              \
166                 struct netdev_queue *txq;                               \
167                                                                         \
168                 txq = netdev_get_tx_queue(dev, idx);                    \
169                 netif_txq_completed_wake(txq, pkts, bytes,              \
170                                          get_desc, start_thrs);         \
171         })
172
173 #endif