io_uring/cancel: don't default to setting req->work.cancel_seq
authorJens Axboe <axboe@kernel.dk>
Mon, 29 Jan 2024 03:11:55 +0000 (20:11 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 8 Feb 2024 20:27:06 +0000 (13:27 -0700)
Just leave it unset by default, avoiding dipping into the last
cacheline (which is otherwise untouched) for the fast path of using
poll to drive networked traffic. Add a flag that tells us if the
sequence is valid or not, and then we can defer actually assigning
the flag and sequence until someone runs cancelations.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring_types.h
io_uring/cancel.c
io_uring/cancel.h
io_uring/io_uring.c
io_uring/poll.c

index 56bf733d3ee65ba5a532430ff30b7296beca1820..e19698daae1a0d403bd4d2c012fb5a44d61a9020 100644 (file)
@@ -463,6 +463,7 @@ enum {
        REQ_F_SUPPORT_NOWAIT_BIT,
        REQ_F_ISREG_BIT,
        REQ_F_POLL_NO_LAZY_BIT,
+       REQ_F_CANCEL_SEQ_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -535,6 +536,8 @@ enum {
        REQ_F_HASH_LOCKED       = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
        /* don't use lazy poll wake for this request */
        REQ_F_POLL_NO_LAZY      = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
+       /* cancel sequence is set and valid */
+       REQ_F_CANCEL_SEQ        = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT),
 };
 
 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
index 8a8b07dfc444cde6181e2e5f86b6aa799ab5980b..acfcdd7f059afd871e3dba0b591e26620f3db64b 100644 (file)
@@ -58,9 +58,8 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
                return false;
        if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
 check_seq:
-               if (cd->seq == req->work.cancel_seq)
+               if (io_cancel_match_sequence(req, cd->seq))
                        return false;
-               req->work.cancel_seq = cd->seq;
        }
 
        return true;
index c0a8e7c520b6d65479b2874d1ec536f21450342a..76b32e65c03cd72452e37293bfcdc5b604e10267 100644 (file)
@@ -25,4 +25,14 @@ void init_hash_table(struct io_hash_table *table, unsigned size);
 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
 bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
 
+static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
+{
+       if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
+               return true;
+
+       req->flags |= REQ_F_CANCEL_SEQ;
+       req->work.cancel_seq = sequence;
+       return false;
+}
+
 #endif
index b8ca907b77eb90f8f8c673895c8cd4aa165a6c42..fd552b260eefb68a2df2ddcea6b1add5e9282c87 100644 (file)
@@ -463,7 +463,6 @@ static void io_prep_async_work(struct io_kiocb *req)
 
        req->work.list.next = NULL;
        req->work.flags = 0;
-       req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
        if (req->flags & REQ_F_FORCE_ASYNC)
                req->work.flags |= IO_WQ_WORK_CONCURRENT;
 
index 7513afc7b702e4cbc727717fc59aa8eb758465df..c2b0a2d0762b2d59c8395e0c4d69f54217d4bf00 100644 (file)
@@ -588,10 +588,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
                                 struct io_poll_table *ipt, __poll_t mask,
                                 unsigned issue_flags)
 {
-       struct io_ring_ctx *ctx = req->ctx;
-
        INIT_HLIST_NODE(&req->hash_node);
-       req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
        io_init_poll_iocb(poll, mask);
        poll->file = req->file;
        req->apoll_events = poll->events;
@@ -818,9 +815,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
                if (poll_only && req->opcode != IORING_OP_POLL_ADD)
                        continue;
                if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
-                       if (cd->seq == req->work.cancel_seq)
+                       if (io_cancel_match_sequence(req, cd->seq))
                                continue;
-                       req->work.cancel_seq = cd->seq;
                }
                *out_bucket = hb;
                return req;