Add a new helper io_flush_cached_locked_reqs() that splices locked_free_list to free_list, and does it right doing all sync and invariant reinit. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d081ef54fb02..6a5d712245f7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1648,6 +1648,15 @@ static void io_req_complete_failed(struct io_kiocb *req, long res) io_req_complete_post(req, res, 0); } +static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, + struct io_comp_state *cs) +{ + spin_lock_irq(&ctx->completion_lock); + list_splice_init(&cs->locked_free_list, &cs->free_list); + cs->locked_free_nr = 0; + spin_unlock_irq(&ctx->completion_lock); +} + /* Returns true IFF there are requests in the cache */ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) { @@ -1660,12 +1669,8 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) * locked cache, grab the lock and move them over to our submission * side cache. */ - if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) { - spin_lock_irq(&ctx->completion_lock); - list_splice_init(&cs->locked_free_list, &cs->free_list); - cs->locked_free_nr = 0; - spin_unlock_irq(&ctx->completion_lock); - } + if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) + io_flush_cached_locked_reqs(ctx, cs); nr = state->free_reqs; while (!list_empty(&cs->free_list)) { @@ -8425,13 +8430,8 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) submit_state->free_reqs = 0; } - spin_lock_irq(&ctx->completion_lock); - list_splice_init(&cs->locked_free_list, &cs->free_list); - cs->locked_free_nr = 0; - spin_unlock_irq(&ctx->completion_lock); - + io_flush_cached_locked_reqs(ctx, cs); io_req_cache_free(&cs->free_list, NULL); - mutex_unlock(&ctx->uring_lock); } -- 2.24.0