Add io_req_cache_empty(), which checks if there are requests in the inline req cache or not. It'll be needed in the future, but also nicely cleans up a few spots poking into ->free_list directly. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 73422af2dd79..3ccc13acb498 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2211,6 +2211,11 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, spin_unlock(&ctx->completion_lock); } +static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) +{ + return !ctx->submit_state.free_list.next; +} + /* * A request might get retired back into the request caches even before opcode * handlers and io_issue_sqe() are done with it, e.g. inline completion path. @@ -2232,7 +2237,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) */ if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) { io_flush_cached_locked_reqs(ctx, &ctx->submit_state); - if (state->free_list.next) + if (!io_req_cache_empty(ctx)) return true; } @@ -2261,7 +2266,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) { - if (unlikely(!ctx->submit_state.free_list.next)) + if (unlikely(io_req_cache_empty(ctx))) return __io_alloc_req_refill(ctx); return true; } @@ -9790,7 +9795,7 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) mutex_lock(&ctx->uring_lock); io_flush_cached_locked_reqs(ctx, state); - while (state->free_list.next) { + while (!io_req_cache_empty(ctx)) { struct io_wq_work_node *node; struct io_kiocb *req; -- 2.35.1