Requests can be in flight on the io-wq, and can be long lived (for example a double read will get onto the io-wq). So make sure to retarget the rsrc nodes on those requests. Signed-off-by: Dylan Yudaken <dylany@xxxxxxxx> --- io_uring/rsrc.c | 46 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 106210e0d5d5..8d0d40713a63 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -16,6 +16,7 @@ #include "openclose.h" #include "rsrc.h" #include "opdef.h" +#include "tctx.h" struct io_rsrc_update { struct file *file; @@ -24,6 +25,11 @@ struct io_rsrc_update { u32 offset; }; +struct io_retarget_data { + struct io_ring_ctx *ctx; + unsigned int refs; +}; + static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, struct io_mapped_ubuf **pimu, struct page **last_hpage); @@ -250,11 +256,42 @@ static void io_rsrc_retarget_schedule(struct io_ring_ctx *ctx) ctx->rsrc_retarget_scheduled = true; } +static void io_retarget_rsrc_wq_cb(struct io_wq_work *work, void *data) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + struct io_retarget_data *rd = data; + + if (req->ctx != rd->ctx) + return; + + rd->refs += io_rsrc_retarget_req(rd->ctx, req); +} + +static void io_rsrc_retarget_wq(struct io_retarget_data *data) + __must_hold(&data->ctx->uring_lock) +{ + struct io_ring_ctx *ctx = data->ctx; + struct io_tctx_node *node; + + list_for_each_entry(node, &ctx->tctx_list, ctx_node) { + struct io_uring_task *tctx = node->task->io_uring; + + if (!tctx->io_wq) + continue; + + io_wq_for_each(tctx->io_wq, io_retarget_rsrc_wq_cb, data); + } +} + static void __io_rsrc_retarget_work(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_rsrc_node *node; - unsigned int refs; + struct io_retarget_data data = { + .ctx = ctx, + .refs = 0 + }; + unsigned int poll_refs; bool any_waiting; if (!ctx->rsrc_node) @@ -273,10 +310,11 @@ static void __io_rsrc_retarget_work(struct io_ring_ctx *ctx) if (!any_waiting) return; - refs = io_rsrc_retarget_table(ctx, &ctx->cancel_table); - refs += io_rsrc_retarget_table(ctx, &ctx->cancel_table_locked); + poll_refs = io_rsrc_retarget_table(ctx, &ctx->cancel_table); + poll_refs += io_rsrc_retarget_table(ctx, &ctx->cancel_table_locked); + io_rsrc_retarget_wq(&data); - ctx->rsrc_cached_refs -= refs; + ctx->rsrc_cached_refs -= (poll_refs + data.refs); while (unlikely(ctx->rsrc_cached_refs < 0)) io_rsrc_refs_refill(ctx); } -- 2.30.2