trace task_work_run to help provide stats on how often task work is run and what batch sizes are coming through. Signed-off-by: Dylan Yudaken <dylany@xxxxxx> --- io_uring/io_uring.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 0b4d3eb06d16..8cf868315008 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -987,10 +987,12 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) } -static void handle_tw_list(struct llist_node *node, - struct io_ring_ctx **ctx, bool *locked, - struct llist_node *fake) +static unsigned int handle_tw_list(struct llist_node *node, + struct io_ring_ctx **ctx, bool *locked, + struct llist_node *fake) { + unsigned int count = 0; + while (node && node != fake) { struct llist_node *next = node->next; struct io_kiocb *req = container_of(node, struct io_kiocb, @@ -1051,12 +1053,14 @@ void tctx_task_work(struct callback_head *cb) task_work); struct llist_node fake = {}; struct llist_node *node = io_llist_xchg(&tctx->task_list, &fake); + unsigned int loops = 1; + unsigned int count = handle_tw_list(node, &ctx, &uring_locked, &fake); - handle_tw_list(node, &ctx, &uring_locked, &fake); node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); while (node != &fake) { + loops++; node = io_llist_xchg(&tctx->task_list, &fake); - handle_tw_list(node, &ctx, &uring_locked, &fake); + count += handle_tw_list(node, &ctx, &uring_locked, &fake); node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); } @@ -1065,6 +1069,8 @@ void tctx_task_work(struct callback_head *cb) /* relaxed read is enough as only the task itself sets ->in_idle */ if (unlikely(atomic_read(&tctx->in_idle))) io_uring_drop_tctx_refs(current); + + trace_io_uring_task_work_run(tctx, count, loops); } void io_req_task_work_add(struct io_kiocb *req) -- 2.30.2