Now io_commit_cqring() is simple and it tolerates well being called without a new CQE filled, so kill a bunch of not needed anymore guards. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bb8a1362cb5e..71f3bea34e66 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1947,8 +1947,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW); } - if (posted) - io_commit_cqring(ctx); + io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); if (posted) io_cqring_ev_posted(ctx); @@ -2382,8 +2381,7 @@ static void __io_req_find_next_prep(struct io_kiocb *req) spin_lock(&ctx->completion_lock); posted = io_disarm_next(req); - if (posted) - io_commit_cqring(ctx); + io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); if (posted) io_cqring_ev_posted(ctx); @@ -10248,8 +10246,7 @@ static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, } } spin_unlock_irq(&ctx->timeout_lock); - if (canceled != 0) - io_commit_cqring(ctx); + io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); if (canceled != 0) io_cqring_ev_posted(ctx); -- 2.35.1