Make TLS's device sendmsg() support MSG_SPLICE_PAGES. This causes pages to be spliced from the source iterator if possible. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells <dhowells@xxxxxxxxxx> cc: Chuck Lever <chuck.lever@xxxxxxxxxx> cc: Boris Pismenny <borisp@xxxxxxxxxx> cc: John Fastabend <john.fastabend@xxxxxxxxx> cc: Jakub Kicinski <kuba@xxxxxxxxxx> cc: Eric Dumazet <edumazet@xxxxxxxxxx> cc: "David S. Miller" <davem@xxxxxxxxxxxxx> cc: Paolo Abeni <pabeni@xxxxxxxxxx> cc: Jens Axboe <axboe@xxxxxxxxx> cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> cc: netdev@xxxxxxxxxxxxxxx --- net/tls/tls_device.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 439be833dcf9..bb3bb523544e 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -509,6 +509,29 @@ static int tls_push_data(struct sock *sk, tls_append_frag(record, &zc_pfrag, copy); iter_offset.offset += copy; + } else if (copy && (flags & MSG_SPLICE_PAGES)) { + struct page_frag zc_pfrag; + struct page **pages = &zc_pfrag.page; + size_t off; + + rc = iov_iter_extract_pages(iter_offset.msg_iter, + &pages, copy, 1, 0, &off); + if (rc <= 0) { + if (rc == 0) + rc = -EIO; + goto handle_error; + } + copy = rc; + + if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) { + iov_iter_revert(iter_offset.msg_iter, copy); + rc = -EIO; + goto handle_error; + } + + zc_pfrag.offset = off; + zc_pfrag.size = copy; + tls_append_frag(record, &zc_pfrag, copy); } else if (copy) { copy = min_t(size_t, copy, pfrag->size - pfrag->offset); @@ -572,6 +595,9 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) union tls_iter_offset iter; int rc; + if (!tls_ctx->zerocopy_sendfile) + msg->msg_flags &= ~MSG_SPLICE_PAGES; + mutex_lock(&tls_ctx->tx_lock); lock_sock(sk);