在 2021/9/25 上午4:59, Pavel Begunkov 写道:
Add a helper io_free_batch_list(), which takes a single linked list and
puts/frees all requests from it in an efficient manner. Will be reused
later.
Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
fs/io_uring.c | 34 +++++++++++++++++++++-------------
1 file changed, 21 insertions(+), 13 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 205127394649..ad8af05af4bc 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2308,12 +2308,31 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
wq_stack_add_head(&req->comp_list, &state->free_list);
}
+static void io_free_batch_list(struct io_ring_ctx *ctx,
+ struct io_wq_work_list *list)
+ __must_hold(&ctx->uring_lock)
+{
+ struct io_wq_work_node *node;
+ struct req_batch rb;
+
+ io_init_req_batch(&rb);
+ node = list->first;
+ do {
+ struct io_kiocb *req = container_of(node, struct io_kiocb,
+ comp_list);
+
+ node = req->comp_list.next;
+ if (req_ref_put_and_test(req))
+ io_req_free_batch(&rb, req, &ctx->submit_state);
+ } while (node);
+ io_req_free_batch_finish(ctx, &rb);
+}
Hi Pavel, Why not we use the wq_list_for_each here as well?
+
static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_wq_work_node *node, *prev;
struct io_submit_state *state = &ctx->submit_state;
- struct req_batch rb;
spin_lock(&ctx->completion_lock);
wq_list_for_each(node, prev, &state->compl_reqs) {
@@ -2327,18 +2346,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
- io_init_req_batch(&rb);
- node = state->compl_reqs.first;
- do {
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- comp_list);
-
- node = req->comp_list.next;
- if (req_ref_put_and_test(req))
- io_req_free_batch(&rb, req, &ctx->submit_state);
- } while (node);
-
- io_req_free_batch_finish(ctx, &rb);
+ io_free_batch_list(ctx, &state->compl_reqs);
INIT_WQ_LIST(&state->compl_reqs);
}