From: Keith Busch <kbusch@xxxxxxxxxx> Cleans up the generic rw prep to not require the do_import flag. Use a different prep function for callers that might need buffer select. Based-on-a-patch-by: Jens Axboe <axboe@xxxxxxxxx> Signed-off-by: Keith Busch <kbusch@xxxxxxxxxx> --- io_uring/rw.c | 45 ++++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/io_uring/rw.c b/io_uring/rw.c index 788f06fbd7db1..b21b423b3cf8f 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -248,8 +248,8 @@ static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir, return ret; } -static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, - int ddir, bool do_import) +static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, + int ddir) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); unsigned ioprio; @@ -285,14 +285,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, rw->len = READ_ONCE(sqe->len); rw->flags = READ_ONCE(sqe->rw_flags); - if (do_import && !io_do_buffer_select(req)) { - struct io_async_rw *io = req->async_data; - - ret = io_import_rw_buffer(ddir, req, io, 0); - if (unlikely(ret)) - return ret; - } - attr_type_mask = READ_ONCE(sqe->attr_type_mask); if (attr_type_mask) { u64 attr_ptr; @@ -307,26 +299,45 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, return 0; } +static int io_rw_do_import(struct io_kiocb *req, int ddir) +{ + if (io_do_buffer_select(req)) + return 0; + + return io_import_rw_buffer(ddir, req, req->async_data, 0); +} + +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, + int ddir) +{ + int ret; + + ret = __io_prep_rw(req, sqe, ddir); + if (unlikely(ret)) + return ret; + + return io_rw_do_import(req, ddir); +} + int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - return io_prep_rw(req, sqe, ITER_DEST, true); + return io_prep_rw(req, sqe, ITER_DEST); } int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - return io_prep_rw(req, sqe, ITER_SOURCE, true); + return io_prep_rw(req, sqe, ITER_SOURCE); } static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, int ddir) { - const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT); int ret; - ret = io_prep_rw(req, sqe, ddir, do_import); + ret = io_prep_rw(req, sqe, ddir); if (unlikely(ret)) return ret; - if (do_import) + if (!(req->flags & REQ_F_BUFFER_SELECT)) return 0; /* @@ -353,7 +364,7 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe struct io_async_rw *io; int ret; - ret = io_prep_rw(req, sqe, ddir, false); + ret = __io_prep_rw(req, sqe, ddir); if (unlikely(ret)) return ret; @@ -386,7 +397,7 @@ int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!(req->flags & REQ_F_BUFFER_SELECT)) return -EINVAL; - ret = io_prep_rw(req, sqe, ITER_DEST, false); + ret = __io_prep_rw(req, sqe, ITER_DEST); if (unlikely(ret)) return ret; -- 2.43.5