io_commit_cqring() is currently always under spinlock section, so it's always better to keep it as slim as possible. Move __io_commit_cqring_flush() out of it into ev_posted*(). If fast checks do fail and this post-processing is required, we'll reacquire ->completion_lock, which is fine as we don't care about performance of draining and offset timeouts. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5a87e0622ecb..c75a5767f58d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1771,20 +1771,21 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx) spin_unlock_irq(&ctx->timeout_lock); } +static inline void io_commit_cqring(struct io_ring_ctx *ctx) +{ + /* order cqe stores with ring update */ + smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); +} + static __cold void __io_commit_cqring_flush(struct io_ring_ctx *ctx) { + spin_lock(&ctx->completion_lock); if (ctx->off_timeout_used) io_flush_timeouts(ctx); if (ctx->drain_active) io_queue_deferred(ctx); -} - -static inline void io_commit_cqring(struct io_ring_ctx *ctx) -{ - if (unlikely(ctx->off_timeout_used || ctx->drain_active)) - __io_commit_cqring_flush(ctx); - /* order cqe stores with ring update */ - smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); } static inline bool io_sqring_full(struct io_ring_ctx *ctx) @@ -1852,6 +1853,9 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx) */ static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx) { + if (unlikely(ctx->off_timeout_used || ctx->drain_active)) + __io_commit_cqring_flush(ctx); + /* * wake_up_all() may seem excessive, but io_wake_function() and * io_should_wake() handle the termination of the loop and only @@ -1865,6 +1869,9 @@ static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx) static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) { + if (unlikely(ctx->off_timeout_used || ctx->drain_active)) + __io_commit_cqring_flush(ctx); + if (ctx->flags & IORING_SETUP_SQPOLL) { if (wq_has_sleeper(&ctx->cq_wait)) wake_up_all(&ctx->cq_wait); -- 2.35.1