Currently batch free handles request memory freeing and ctx ref putting together. Separate them and use different counters, that will be needed for reusing reqs memory. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index be940db96fb8..c1f7dd17a62f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2233,6 +2233,7 @@ static void io_free_req(struct io_kiocb *req) struct req_batch { void *reqs[IO_IOPOLL_BATCH]; int to_free; + int ctx_refs; struct task_struct *task; int task_refs; @@ -2242,6 +2243,7 @@ static inline void io_init_req_batch(struct req_batch *rb) { rb->to_free = 0; rb->task_refs = 0; + rb->ctx_refs = 0; rb->task = NULL; } @@ -2249,7 +2251,6 @@ static void __io_req_free_batch_flush(struct io_ring_ctx *ctx, struct req_batch *rb) { kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs); - percpu_ref_put_many(&ctx->refs, rb->to_free); rb->to_free = 0; } @@ -2262,6 +2263,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx, io_put_task(rb->task, rb->task_refs); rb->task = NULL; } + if (rb->ctx_refs) + percpu_ref_put_many(&ctx->refs, rb->ctx_refs); } static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) @@ -2275,6 +2278,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) rb->task_refs = 0; } rb->task_refs++; + rb->ctx_refs++; io_dismantle_req(req); rb->reqs[rb->to_free++] = req; -- 2.24.0