All our task_work handling is targeted at the state in the io_kiocb itself, which is what it is being used for. However, MSG_RING rolls its own task_work handling, ignoring how that is usually done. In preparation for switching MSG_RING to be able to use the normal task_work handling, add io_req_task_work_add_remote() which allows the caller to pass in the target io_ring_ctx and task. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- io_uring/io_uring.c | 27 +++++++++++++++++++-------- io_uring/io_uring.h | 2 ++ 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 9978dbe00027..609ff9ea5930 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1241,9 +1241,10 @@ void tctx_task_work(struct callback_head *cb) WARN_ON_ONCE(ret); } -static inline void io_req_local_work_add(struct io_kiocb *req, unsigned tw_flags) +static inline void io_req_local_work_add(struct io_kiocb *req, + struct io_ring_ctx *ctx, + unsigned tw_flags) { - struct io_ring_ctx *ctx = req->ctx; unsigned nr_wait, nr_tw, nr_tw_prev; unsigned long flags; @@ -1291,9 +1292,10 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned tw_flags wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE); } -static void io_req_normal_work_add(struct io_kiocb *req) +static void io_req_normal_work_add(struct io_kiocb *req, + struct task_struct *task) { - struct io_uring_task *tctx = req->task->io_uring; + struct io_uring_task *tctx = task->io_uring; struct io_ring_ctx *ctx = req->ctx; unsigned long flags; bool was_empty; @@ -1319,7 +1321,7 @@ static void io_req_normal_work_add(struct io_kiocb *req) return; } - if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) + if (likely(!task_work_add(task, &tctx->task_work, ctx->notify_method))) return; io_fallback_tw(tctx, false); @@ -1328,9 +1330,18 @@ static void io_req_normal_work_add(struct io_kiocb *req) void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) { if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) - io_req_local_work_add(req, flags); + io_req_local_work_add(req, req->ctx, flags); + else + io_req_normal_work_add(req, req->task); +} + +void io_req_task_work_add_remote(struct io_kiocb *req, struct task_struct *task, + struct io_ring_ctx *ctx, unsigned flags) +{ + if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) + io_req_local_work_add(req, ctx, flags); else - io_req_normal_work_add(req); + io_req_normal_work_add(req, task); } static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) @@ -1349,7 +1360,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) io_task_work.node); node = node->next; - io_req_normal_work_add(req); + io_req_normal_work_add(req, req->task); } } diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index bde463642c71..a6dec5321ec4 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -74,6 +74,8 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd, unsigned issue_flags); void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); +void io_req_task_work_add_remote(struct io_kiocb *req, struct task_struct *task, + struct io_ring_ctx *ctx, unsigned flags); bool io_alloc_async_data(struct io_kiocb *req); void io_req_task_queue(struct io_kiocb *req); void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); -- 2.43.0