Replace queue_delayed_work() with mod_delayed_work() in io_rsrc_node_ref_zero() as the later one can schedule a new work, and cleanup it further for better readability. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5dfd33753471..f1a96988c3f5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7565,7 +7565,7 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref) struct io_rsrc_data *data = node->rsrc_data; struct io_ring_ctx *ctx = data->ctx; bool first_add = false; - int delay = HZ; + int delay; io_rsrc_ref_lock(ctx); node->done = true; @@ -7581,13 +7581,9 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref) } io_rsrc_ref_unlock(ctx); - if (percpu_ref_is_dying(&data->refs)) - delay = 0; - - if (!delay) - mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0); - else if (first_add) - queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay); + delay = percpu_ref_is_dying(&data->refs) ? 0 : HZ; + if (first_add || !delay) + mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay); } static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) -- 2.24.0