io_submit_flush_completions() already does batching, hence also bundle freeing reusing batch_free from iopoll. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3277a06e2fb6..6f767781351f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1795,6 +1795,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) static void io_submit_flush_completions(struct io_comp_state *cs) { + struct req_batch rb; struct io_kiocb *req; struct io_ring_ctx *ctx = cs->ctx; int i, nr = cs->nr; @@ -1808,8 +1809,13 @@ static void io_submit_flush_completions(struct io_comp_state *cs) spin_unlock_irq(&ctx->completion_lock); io_cqring_ev_posted(ctx); - for (i = 0; i < nr; ++i) - io_put_req(cs->reqs[i]); + rb.to_free = 0; + for (i = 0; i < nr; ++i) { + req = cs->reqs[i]; + if (refcount_dec_and_test(&req->refs)) + io_req_free_batch(&rb, req); + } + io_req_free_batch_finish(ctx, &rb); cs->nr = 0; } -- 2.24.0