After io_req_complete() per-op data is not needed anymore, reuse it to keep a list for struct io_comp_state there, cleaning up a request before hand. Though, useless by itself, that's a preparation for compacting io_kiocb. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 8482b9aed952..2316e6b840b3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -487,6 +487,12 @@ struct io_statx { struct statx __user *buffer; }; +/* Safe to use only after *fill_event() and properly cleaning per-op data. */ +struct io_completion { + struct file *file; + struct list_head list; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -622,6 +628,7 @@ struct io_kiocb { struct io_splice splice; struct io_provide_buf pbuf; struct io_statx statx; + struct io_completion compl; }; struct io_async_ctx *io; @@ -1410,8 +1417,8 @@ static void io_submit_flush_completions(struct io_comp_state *cs) while (!list_empty(&cs->list)) { struct io_kiocb *req; - req = list_first_entry(&cs->list, struct io_kiocb, list); - list_del(&req->list); + req = list_first_entry(&cs->list, struct io_kiocb, compl.list); + list_del(&req->compl.list); __io_cqring_fill_event(req, req->result, req->cflags); if (!(req->flags & REQ_F_LINK_HEAD)) { req->flags |= REQ_F_COMP_LOCKED; @@ -1436,9 +1443,12 @@ static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, io_cqring_add_event(req, res, cflags); io_put_req(req); } else { + if (req->flags & REQ_F_NEED_CLEANUP) + io_cleanup_req(req); + req->result = res; req->cflags = cflags; - list_add_tail(&req->list, &cs->list); + list_add_tail(&req->compl.list, &cs->list); if (++cs->nr >= 32) io_submit_flush_completions(cs); } -- 2.24.0