Add tw_ctx to represent whether there is only one ctx in prior_task_list or not, this is useful in the next patch Signed-off-by: Hao Xu <haoxu@xxxxxxxxxxxxxxxxx> --- fs/io_uring.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 48387ea47c15..596e9e885362 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -477,6 +477,7 @@ struct io_uring_task { struct io_wq_work_list task_list; struct callback_head task_work; struct io_wq_work_list prior_task_list; + struct io_ring_ctx *tw_ctx; unsigned int nr; unsigned int prior_nr; bool task_running; @@ -2222,6 +2223,10 @@ static void io_req_task_work_add(struct io_kiocb *req, bool emergency) if (emergency && tctx->prior_nr * MAX_EMERGENCY_TW_RATIO < tctx->nr) { wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list); tctx->prior_nr++; + if (tctx->prior_nr == 1) + tctx->tw_ctx = req->ctx; + else if (tctx->tw_ctx && req->ctx != tctx->tw_ctx) + tctx->tw_ctx = NULL; } else { wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); } @@ -2250,6 +2255,7 @@ static void io_req_task_work_add(struct io_kiocb *req, bool emergency) spin_lock_irqsave(&tctx->task_lock, flags); tctx->nr = tctx->prior_nr = 0; + tctx->tw_ctx = NULL; tctx->task_running = false; wq_list_merge(&tctx->prior_task_list, &tctx->task_list); node = tctx->prior_task_list.first; -- 2.24.4