We manage these separately right now, just tie it to the request lifetime and make it be part of the usual REQ_F_NEED_CLEANUP logic. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- fs/io_uring.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index a668d6a3319c..2ea909ed2f49 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5029,9 +5029,6 @@ static void io_async_task_func(struct callback_head *cb) __io_req_task_submit(req); else io_req_complete_failed(req, -ECANCELED); - - kfree(apoll->double_poll); - kfree(apoll); } static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, @@ -5147,8 +5144,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req) if (ret || ipt.error) { io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); - kfree(apoll->double_poll); - kfree(apoll); return false; } spin_unlock_irq(&ctx->completion_lock); @@ -5186,12 +5181,8 @@ static bool io_poll_remove_waitqs(struct io_kiocb *req) do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true); if (req->opcode != IORING_OP_POLL_ADD && do_complete) { - struct async_poll *apoll = req->apoll; - /* non-poll requests have submit ref still */ req_ref_put(req); - kfree(apoll->double_poll); - kfree(apoll); } return do_complete; } @@ -5990,7 +5981,8 @@ static int io_req_defer(struct io_kiocb *req) static void io_clean_op(struct io_kiocb *req) { - if (!(req->flags & (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP))) + if (!(req->flags & (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | + REQ_F_POLLED))) return; if (req->flags & REQ_F_BUFFER_SELECTED) { switch (req->opcode) { @@ -6047,6 +6039,11 @@ static void io_clean_op(struct io_kiocb *req) } req->flags &= ~REQ_F_NEED_CLEANUP; } + if ((req->flags & REQ_F_POLLED) && req->apoll) { + kfree(req->apoll->double_poll); + kfree(req->apoll); + req->apoll = NULL; + } } static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) -- 2.31.1