Most places that want to run local tw explicitly and in advance check if they are allowed to do so. Don't rely on a similar check in __io_run_local_work(), leave it as a just-in-case warning and make sure callers checks capabilities themselves. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- io_uring/io_uring.c | 15 ++++++--------- io_uring/io_uring.h | 5 +++++ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index bf6f9777d165..3bb3e9889717 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1296,14 +1296,13 @@ int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked) struct llist_node *node; struct llist_node fake; struct llist_node *current_final = NULL; - int ret; + int ret = 0; unsigned int loops = 1; - if (unlikely(ctx->submitter_task != current)) + if (WARN_ON_ONCE(ctx->submitter_task != current)) return -EEXIST; node = io_llist_xchg(&ctx->work_llist, &fake); - ret = 0; again: while (node != current_final) { struct llist_node *next = node->next; @@ -2511,11 +2510,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, if (!io_allowed_run_tw(ctx)) return -EEXIST; - if (!llist_empty(&ctx->work_llist)) { - ret = io_run_local_work(ctx); - if (ret < 0) - return ret; - } + if (!llist_empty(&ctx->work_llist)) + io_run_local_work(ctx); io_run_task_work(); io_cqring_overflow_flush(ctx); /* if user messes with these they will just get an early return */ @@ -3052,7 +3048,8 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, } } - if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) + if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && + io_allowed_defer_tw_run(ctx)) ret |= io_run_local_work(ctx) > 0; ret |= io_cancel_defer_files(ctx, task, cancel_all); mutex_lock(&ctx->uring_lock); diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 8a5c3affd724..9b7baeff5a1c 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -352,6 +352,11 @@ static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) return container_of(node, struct io_kiocb, comp_list); } +static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) +{ + return likely(ctx->submitter_task == current); +} + static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) { return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || -- 2.38.1