Add support of registered buffers to send() and recv(). Done by exploiting last bit of send/recv flags, IO_MSG_FIXED, which is cleared before going into net stack. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 39 +++++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 7703291617f3..390495170fb0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -104,6 +104,8 @@ #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ IORING_REGISTER_LAST + IORING_OP_LAST) +#define IO_MSG_FIXED (1U << 31) + struct io_uring { u32 head ____cacheline_aligned_in_smp; u32 tail ____cacheline_aligned_in_smp; @@ -4689,18 +4691,25 @@ static int io_send(struct io_kiocb *req, bool force_nonblock, struct io_comp_state *cs) { struct io_sr_msg *sr = &req->sr_msg; + unsigned int flags = sr->msg_flags; struct msghdr msg; struct iovec iov; struct socket *sock; - unsigned flags; int ret; sock = sock_from_file(req->file, &ret); if (unlikely(!sock)) return ret; - ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter); - if (unlikely(ret)) + if (flags & IO_MSG_FIXED) { + ret = io_import_fixed(req, WRITE, (u64)sr->buf, sr->len, + &msg.msg_iter); + flags &= ~IO_MSG_FIXED; + } else { + ret = import_single_range(WRITE, sr->buf, sr->len, &iov, + &msg.msg_iter); + } + if (unlikely(ret < 0)) return ret; msg.msg_name = NULL; @@ -4708,7 +4717,6 @@ static int io_send(struct io_kiocb *req, bool force_nonblock, msg.msg_controllen = 0; msg.msg_namelen = 0; - flags = req->sr_msg.msg_flags; if (flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; else if (force_nonblock) @@ -4821,15 +4829,22 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, if (unlikely(!sock)) return ret; - if (req->flags & REQ_F_BUFFER_SELECT) { - kbuf = io_recv_buffer_select(req, !force_nonblock); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - buf = u64_to_user_ptr(kbuf->addr); - } + if (flags & IO_MSG_FIXED) { + ret = io_import_fixed(req, READ, (u64)buf, sr->len, + &msg.msg_iter); + flags &= ~IO_MSG_FIXED; + } else { + if (req->flags & REQ_F_BUFFER_SELECT) { + kbuf = io_recv_buffer_select(req, !force_nonblock); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); + buf = u64_to_user_ptr(kbuf->addr); + } - ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter); - if (unlikely(ret)) + ret = import_single_range(READ, buf, sr->len, &iov, + &msg.msg_iter); + } + if (unlikely(ret < 0)) goto out_free; msg.msg_name = NULL; -- 2.24.0