There is a slight optimisation to be had by calculating the correct pos pointer inside io_kiocb_update_pos and then using that later. It seems code size drops by a bit: 000000000000a1b0 0000000000000400 t io_read 000000000000a5b0 0000000000000319 t io_write vs 000000000000a1b0 00000000000003f6 t io_read 000000000000a5b0 0000000000000310 t io_write Signed-off-by: Dylan Yudaken <dylany@xxxxxx> --- fs/io_uring.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 50b93ff2ee12..abd8c739988e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3066,17 +3066,21 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) } } -static inline void +static inline loff_t* io_kiocb_update_pos(struct io_kiocb *req, struct kiocb *kiocb) { + bool is_stream = req->file->f_mode & FMODE_STREAM; if (kiocb->ki_pos == -1) { - if (!(req->file->f_mode & FMODE_STREAM)) { + if (!is_stream) { req->flags |= REQ_F_CUR_POS; kiocb->ki_pos = req->file->f_pos; + return &kiocb->ki_pos; } else { kiocb->ki_pos = 0; + return NULL; } } + return is_stream ? NULL : &kiocb->ki_pos; } static void kiocb_done(struct io_kiocb *req, ssize_t ret, @@ -3637,6 +3641,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct io_async_rw *rw; ssize_t ret, ret2; + loff_t *ppos; if (!req_has_async_data(req)) { ret = io_import_iovec(READ, req, &iovec, s, issue_flags); @@ -3667,9 +3672,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) kiocb->ki_flags &= ~IOCB_NOWAIT; } - io_kiocb_update_pos(req, kiocb); + ppos = io_kiocb_update_pos(req, kiocb); - ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); + ret = rw_verify_area(READ, req->file, ppos, req->result); if (unlikely(ret)) { kfree(iovec); return ret; @@ -3768,6 +3773,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) struct kiocb *kiocb = &req->rw.kiocb; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; ssize_t ret, ret2; + loff_t *ppos; if (!req_has_async_data(req)) { ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); @@ -3798,9 +3804,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) kiocb->ki_flags &= ~IOCB_NOWAIT; } - io_kiocb_update_pos(req, kiocb); + ppos = io_kiocb_update_pos(req, kiocb); - ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); + ret = rw_verify_area(WRITE, req->file, ppos, req->result); if (unlikely(ret)) goto out_free; -- 2.30.2