We're going to have allocated bvecs shortly, we need a place to store them and intra releasing it. Reuse the struct io_async_msghdr iovec caching for that. Get rid of typing and switch to bytes instead of keeping the number of iov elements the cached array can store. Performance wise it should be just fine as divisions will be compiled into binary shifts. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- io_uring/net.c | 67 +++++++++++++++++++++++++++----------------------- io_uring/net.h | 4 +-- 2 files changed, 38 insertions(+), 33 deletions(-) diff --git a/io_uring/net.c b/io_uring/net.c index bd24290fa646..bc062b5a7a55 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -128,14 +128,19 @@ static bool io_net_retry(struct socket *sock, int flags) static inline void io_kmsg_set_iovec(struct io_async_msghdr *kmsg, struct iovec *iov, int nr) { - kmsg->free_iov_nr = nr; - kmsg->free_iov = iov; + kmsg->free_vec_bytes = nr * sizeof(*iov); + kmsg->free_vec = iov; +} + +static int io_kmsg_nr_free_iov(struct io_async_msghdr *kmsg) +{ + return kmsg->free_vec_bytes / sizeof(struct iovec); } static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg) { - if (kmsg->free_iov) { - kfree(kmsg->free_iov); + if (kmsg->free_vec) { + kfree(kmsg->free_vec); io_kmsg_set_iovec(kmsg, NULL, 0); } } @@ -143,7 +148,7 @@ static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg) static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) { struct io_async_msghdr *hdr = req->async_data; - struct iovec *iov; + void *vec; /* can't recycle, ensure we free the iovec if we have one */ if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { @@ -152,10 +157,10 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) } /* Let normal cleanup path reap it if we fail adding to the cache */ - iov = hdr->free_iov; + vec = hdr->free_vec; if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) { - if (iov) - kasan_mempool_poison_object(iov); + if (vec) + kasan_mempool_poison_object(vec); req->async_data = NULL; req->flags &= ~REQ_F_ASYNC_DATA; } @@ -168,9 +173,9 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req) hdr = io_alloc_cache_get(&ctx->netmsg_cache); if (hdr) { - if (hdr->free_iov) { - kasan_mempool_unpoison_object(hdr->free_iov, - hdr->free_iov_nr * sizeof(struct iovec)); + if (hdr->free_vec) { + kasan_mempool_unpoison_object(hdr->free_vec, + hdr->free_vec_bytes); req->flags |= REQ_F_NEED_CLEANUP; } req->flags |= REQ_F_ASYNC_DATA; @@ -192,8 +197,8 @@ static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg, { if (iov) { req->flags |= REQ_F_NEED_CLEANUP; - if (kmsg->free_iov) - kfree(kmsg->free_iov); + if (kmsg->free_vec) + kfree(kmsg->free_vec); io_kmsg_set_iovec(kmsg, iov, kmsg->msg.msg_iter.nr_segs); } return 0; @@ -220,9 +225,9 @@ static int io_compat_msg_copy_hdr(struct io_kiocb *req, struct iovec *iov; int ret, nr_segs; - if (iomsg->free_iov) { - nr_segs = iomsg->free_iov_nr; - iov = iomsg->free_iov; + if (iomsg->free_vec) { + nr_segs = io_kmsg_nr_free_iov(iomsg); + iov = iomsg->free_vec; } else { iov = &iomsg->fast_iov; nr_segs = 1; @@ -270,9 +275,9 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, struct iovec *iov; int ret, nr_segs; - if (iomsg->free_iov) { - nr_segs = iomsg->free_iov_nr; - iov = iomsg->free_iov; + if (iomsg->free_vec) { + nr_segs = io_kmsg_nr_free_iov(iomsg); + iov = iomsg->free_vec; } else { iov = &iomsg->fast_iov; nr_segs = 1; @@ -478,7 +483,7 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret) if (iter_is_ubuf(&kmsg->msg.msg_iter)) return 1; - iov = kmsg->free_iov; + iov = kmsg->free_vec; if (!iov) iov = &kmsg->fast_iov; @@ -611,9 +616,9 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) .nr_iovs = 1, }; - if (kmsg->free_iov) { - arg.nr_iovs = kmsg->free_iov_nr; - arg.iovs = kmsg->free_iov; + if (kmsg->free_vec) { + arg.nr_iovs = io_kmsg_nr_free_iov(kmsg); + arg.iovs = kmsg->free_vec; arg.mode = KBUF_MODE_FREE; } @@ -626,7 +631,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(ret < 0)) return ret; - if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { + if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_vec) { io_kmsg_set_iovec(kmsg, arg.iovs, ret); req->flags |= REQ_F_NEED_CLEANUP; } @@ -1088,9 +1093,9 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg .mode = KBUF_MODE_EXPAND, }; - if (kmsg->free_iov) { - arg.nr_iovs = kmsg->free_iov_nr; - arg.iovs = kmsg->free_iov; + if (kmsg->free_vec) { + arg.nr_iovs = io_kmsg_nr_free_iov(kmsg); + arg.iovs = kmsg->free_vec; arg.mode |= KBUF_MODE_FREE; } @@ -1109,7 +1114,7 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg } iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, arg.out_len); - if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { + if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_vec) { io_kmsg_set_iovec(kmsg, arg.iovs, ret); req->flags |= REQ_F_NEED_CLEANUP; } @@ -1807,9 +1812,9 @@ void io_netmsg_cache_free(const void *entry) { struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; - if (kmsg->free_iov) { - kasan_mempool_unpoison_object(kmsg->free_iov, - kmsg->free_iov_nr * sizeof(struct iovec)); + if (kmsg->free_vec) { + kasan_mempool_unpoison_object(kmsg->free_vec, + kmsg->free_vec_bytes); io_netmsg_iovec_free(kmsg); } kfree(kmsg); diff --git a/io_uring/net.h b/io_uring/net.h index 52bfee05f06a..65d497985572 100644 --- a/io_uring/net.h +++ b/io_uring/net.h @@ -7,8 +7,8 @@ struct io_async_msghdr { #if defined(CONFIG_NET) struct iovec fast_iov; /* points to an allocated iov, if NULL we use fast_iov instead */ - struct iovec *free_iov; - int free_iov_nr; + void *free_vec; + int free_vec_bytes; int namelen; __kernel_size_t controllen; __kernel_size_t payloadlen; -- 2.46.0