All opcode handlers pretty well know whether they need async data or not, and can skip testing for needs_async_data. The exception is rw the generic path, but those test the flag by hand anyway. So, check the flag and make io_alloc_async_data() allocating unconditionally. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index b3585226bfb5..8f25371ae904 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3105,21 +3105,13 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, } } -static inline int __io_alloc_async_data(struct io_kiocb *req) +static inline int io_alloc_async_data(struct io_kiocb *req) { WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); return req->async_data == NULL; } -static int io_alloc_async_data(struct io_kiocb *req) -{ - if (!io_op_defs[req->opcode].needs_async_data) - return 0; - - return __io_alloc_async_data(req); -} - static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, const struct iovec *fast_iov, struct iov_iter *iter, bool force) @@ -3127,7 +3119,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, if (!force && !io_op_defs[req->opcode].needs_async_data) return 0; if (!req->async_data) { - if (__io_alloc_async_data(req)) { + if (io_alloc_async_data(req)) { kfree(iovec); return -ENOMEM; } @@ -5814,7 +5806,7 @@ static int io_req_defer_prep(struct io_kiocb *req) /* some opcodes init it during the inital prep */ if (req->async_data) return 0; - if (__io_alloc_async_data(req)) + if (io_alloc_async_data(req)) return -EAGAIN; return io_req_prep_async(req); } -- 2.24.0