Put REQ_F_WORK_INITIALIZED check together with slow path REQ_F_NEED_CLEANUP/etc. Also don't reload req->flags twice but cache it in a var. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d50d0e98639b..c4ebdf1f759f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1670,24 +1670,28 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file, static void io_dismantle_req(struct io_kiocb *req) { - if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED)) - io_clean_op(req); - if (req->async_data) - kfree(req->async_data); + unsigned int flags = req->flags; + if (req->file) - io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); - if (req->fixed_rsrc_refs) - percpu_ref_put(req->fixed_rsrc_refs); + io_put_file(req, req->file, (flags & REQ_F_FIXED_FILE)); + if (flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED | + REQ_F_INFLIGHT)) { + io_clean_op(req); - if (req->flags & REQ_F_INFLIGHT) { - struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; + if (req->flags & REQ_F_INFLIGHT) { + struct io_ring_ctx *ctx = req->ctx; + unsigned long flags; - spin_lock_irqsave(&ctx->inflight_lock, flags); - list_del(&req->inflight_entry); - spin_unlock_irqrestore(&ctx->inflight_lock, flags); - req->flags &= ~REQ_F_INFLIGHT; + spin_lock_irqsave(&ctx->inflight_lock, flags); + list_del(&req->inflight_entry); + spin_unlock_irqrestore(&ctx->inflight_lock, flags); + req->flags &= ~REQ_F_INFLIGHT; + } } + if (req->fixed_rsrc_refs) + percpu_ref_put(req->fixed_rsrc_refs); + if (req->async_data) + kfree(req->async_data); } /* must to be called somewhat shortly after putting a request */ -- 2.24.0