io_uring: add io_file_can_poll() helper
authorJens Axboe <axboe@kernel.dk>
Mon, 29 Jan 2024 03:08:24 +0000 (20:08 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 8 Feb 2024 20:27:06 +0000 (13:27 -0700)
This adds a flag to avoid dipping dereferencing file and then f_op to
figure out if the file has a poll handler defined or not. We generally
call this at least twice for networked workloads, and if using ring
provided buffers, we do it on every buffer selection. Particularly the
latter is troublesome, as it's otherwise a very fast operation.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring_types.h
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/kbuf.c
io_uring/poll.c
io_uring/rw.c

index e19698daae1a0d403bd4d2c012fb5a44d61a9020..4ddc7b3168f3d4481aa4b2476d1de4ddfaf30a13 100644 (file)
@@ -464,6 +464,7 @@ enum {
        REQ_F_ISREG_BIT,
        REQ_F_POLL_NO_LAZY_BIT,
        REQ_F_CANCEL_SEQ_BIT,
+       REQ_F_CAN_POLL_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -538,6 +539,8 @@ enum {
        REQ_F_POLL_NO_LAZY      = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
        /* cancel sequence is set and valid */
        REQ_F_CANCEL_SEQ        = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT),
+       /* file is pollable */
+       REQ_F_CAN_POLL          = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
 };
 
 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
index fd552b260eefb68a2df2ddcea6b1add5e9282c87..17bd16be1dfd5076f4bcb1f78fce4c5c3fcef603 100644 (file)
@@ -1968,7 +1968,7 @@ fail:
        if (req->flags & REQ_F_FORCE_ASYNC) {
                bool opcode_poll = def->pollin || def->pollout;
 
-               if (opcode_poll && file_can_poll(req->file)) {
+               if (opcode_poll && io_file_can_poll(req)) {
                        needs_poll = true;
                        issue_flags |= IO_URING_F_NONBLOCK;
                }
index d5495710c17877624c75d8fa36b71af9535336a3..2952551fe345597b328c02287e46cb027725fd61 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/lockdep.h>
 #include <linux/resume_user_mode.h>
 #include <linux/kasan.h>
+#include <linux/poll.h>
 #include <linux/io_uring_types.h>
 #include <uapi/linux/eventpoll.h>
 #include "io-wq.h"
@@ -398,4 +399,15 @@ static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
                return 2 * sizeof(struct io_uring_sqe);
        return sizeof(struct io_uring_sqe);
 }
+
+static inline bool io_file_can_poll(struct io_kiocb *req)
+{
+       if (req->flags & REQ_F_CAN_POLL)
+               return true;
+       if (file_can_poll(req->file)) {
+               req->flags |= REQ_F_CAN_POLL;
+               return true;
+       }
+       return false;
+}
 #endif
index 18df5a9d2f5e7defb2df04f548d9e67b737ddbaf..71880615bb7881d5cf3527f4dd4ab42a03dc53b5 100644 (file)
@@ -180,7 +180,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
        req->buf_list = bl;
        req->buf_index = buf->bid;
 
-       if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
+       if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
                /*
                 * If we came in unlocked, we have no choice but to consume the
                 * buffer here, otherwise nothing ensures that the buffer won't
index c2b0a2d0762b2d59c8395e0c4d69f54217d4bf00..3f3380dc5f68190176f6a4e686acac416945408a 100644 (file)
@@ -724,7 +724,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
 
        if (!def->pollin && !def->pollout)
                return IO_APOLL_ABORTED;
-       if (!file_can_poll(req->file))
+       if (!io_file_can_poll(req))
                return IO_APOLL_ABORTED;
        if (!(req->flags & REQ_F_APOLL_MULTISHOT))
                mask |= EPOLLONESHOT;
index d5e79d9bdc717b8cb917d6e06b2cbbe6840dd762..0fb7a045163ae7c58d0da639db4e26c30fea972d 100644 (file)
@@ -682,7 +682,7 @@ static bool io_rw_should_retry(struct io_kiocb *req)
         * just use poll if we can, and don't attempt if the fs doesn't
         * support callback based unlocks
         */
-       if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
+       if (io_file_can_poll(req) || !(req->file->f_mode & FMODE_BUF_RASYNC))
                return false;
 
        wait->wait.func = io_async_buf_func;
@@ -831,7 +831,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
                 * If we can poll, just do that. For a vectored read, we'll
                 * need to copy state first.
                 */
-               if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
+               if (io_file_can_poll(req) && !io_issue_defs[req->opcode].vectored)
                        return -EAGAIN;
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
@@ -930,7 +930,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
        /*
         * Multishot MUST be used on a pollable file
         */
-       if (!file_can_poll(req->file))
+       if (!io_file_can_poll(req))
                return -EBADFD;
 
        ret = __io_read(req, issue_flags);