Prepare for upcoming removal of submission references by delaying ->async_data deallocation, preventing kfree()'ing it up until io_issue_sqe() returns. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e7dabbe885b3..8ca9895535dd 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1771,6 +1771,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + struct io_kiocb *req; int ret, i; BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH); @@ -1796,8 +1797,18 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) io_preinit_req(state->reqs[i], ctx); state->free_reqs = ret; got_req: - state->free_reqs--; - return state->reqs[state->free_reqs]; + req = state->reqs[--state->free_reqs]; + + /* + * io_req_free(), dismantle() and co. don't free ->async_data, that's + * needed to prevent io_issue_sqe() from kfree'ing the memory somewhere + * deep down the stack and accessing it afterwards. + */ + if (req->async_data) { + kfree(req->async_data); + req->async_data = NULL; + } + return req; } static inline void io_put_file(struct file *file) @@ -1816,10 +1827,6 @@ static void io_dismantle_req(struct io_kiocb *req) io_put_file(req->file); if (req->fixed_rsrc_refs) percpu_ref_put(req->fixed_rsrc_refs); - if (req->async_data) { - kfree(req->async_data); - req->async_data = NULL; - } } static void __io_free_req(struct io_kiocb *req) @@ -8614,6 +8621,8 @@ static void io_req_cache_free(struct list_head *list) list_for_each_entry_safe(req, nxt, list, inflight_entry) { list_del(&req->inflight_entry); + /* see comment in io_alloc_req() */ + kfree(req->async_data); kmem_cache_free(req_cachep, req); } } @@ -8621,9 +8630,16 @@ static void io_req_cache_free(struct list_head *list) static void io_req_caches_free(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; + struct io_kiocb *req; + int i; mutex_lock(&ctx->uring_lock); + /* see comment in io_alloc_req() */ + for (i = 0; i < state->free_reqs; i++) { + req = state->reqs[i]; + kfree(req->async_data); + } if (state->free_reqs) { kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); state->free_reqs = 0; -- 2.32.0