A preparation patch, which moves a fast ->io_ev_fd check out of io_eventfd_signal() into ev_posted*(). Compilers are smart enough for it to not change anything, but will need it later. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 603cbe687dd2..5a87e0622ecb 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1820,10 +1820,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) { struct io_ev_fd *ev_fd; - /* Return quickly if ctx->io_ev_fd doesn't exist */ - if (likely(!rcu_dereference_raw(ctx->io_ev_fd))) - return; - rcu_read_lock(); /* * rcu_dereference ctx->io_ev_fd once and use it for both for checking @@ -1843,7 +1839,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) if (!ev_fd->eventfd_async || io_wq_current_is_worker()) eventfd_signal(ev_fd->cq_ev_fd, 1); - out: rcu_read_unlock(); } @@ -1855,7 +1850,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) * 1:1 relationship between how many times this function is called (and * hence the eventfd count) and number of CQEs posted to the CQ ring. */ -static void io_cqring_ev_posted(struct io_ring_ctx *ctx) +static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx) { /* * wake_up_all() may seem excessive, but io_wake_function() and @@ -1864,7 +1859,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) */ if (wq_has_sleeper(&ctx->cq_wait)) wake_up_all(&ctx->cq_wait); - io_eventfd_signal(ctx); + if (unlikely(rcu_dereference_raw(ctx->io_ev_fd))) + io_eventfd_signal(ctx); } static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) @@ -1873,7 +1869,8 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) if (wq_has_sleeper(&ctx->cq_wait)) wake_up_all(&ctx->cq_wait); } - io_eventfd_signal(ctx); + if (unlikely(rcu_dereference_raw(ctx->io_ev_fd))) + io_eventfd_signal(ctx); } /* Returns true if there are no backlogged entries after the flush */ -- 2.35.1