Convert chtls_sendpage() to use sendmsg() with MSG_SPLICE_PAGES rather than directly splicing in the pages itself. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells <dhowells@xxxxxxxxxx> cc: Ayush Sawal <ayush.sawal@xxxxxxxxxxx> cc: "David S. Miller" <davem@xxxxxxxxxxxxx> cc: Eric Dumazet <edumazet@xxxxxxxxxx> cc: Jakub Kicinski <kuba@xxxxxxxxxx> cc: Paolo Abeni <pabeni@xxxxxxxxxx> cc: Jens Axboe <axboe@xxxxxxxxx> cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> cc: netdev@xxxxxxxxxxxxxxx --- .../chelsio/inline_crypto/chtls/chtls_io.c | 109 ++---------------- 1 file changed, 7 insertions(+), 102 deletions(-) diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c index 1d08386ac916..65efd20ec796 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c @@ -1240,110 +1240,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int chtls_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { - struct chtls_sock *csk; - struct chtls_dev *cdev; - int mss, err, copied; - struct tcp_sock *tp; - long timeo; - - tp = tcp_sk(sk); - copied = 0; - csk = rcu_dereference_sk_user_data(sk); - cdev = csk->cdev; - lock_sock(sk); - timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; - err = sk_stream_wait_connect(sk, &timeo); - if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && - err != 0) - goto out_err; - - mss = csk->mss; - csk_set_flag(csk, CSK_TX_MORE_DATA); - - while (size > 0) { - struct sk_buff *skb = skb_peek_tail(&csk->txq); - int copy, i; - - if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || - (copy = mss - skb->len) <= 0) { -new_buf: - if (!csk_mem_free(cdev, sk)) - goto wait_for_sndbuf; + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; - if (is_tls_tx(csk)) { - skb = get_record_skb(sk, - select_size(sk, size, - flags, - TX_TLSHDR_LEN), - true); - } else { - skb = get_tx_skb(sk, 0); - } - if (!skb) - goto wait_for_memory; - copy = mss; - } - if (copy > size) - copy = size; - - i = skb_shinfo(skb)->nr_frags; - if (skb_can_coalesce(skb, i, page, offset)) { - skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); - } else if (i < MAX_SKB_FRAGS) { - get_page(page); - skb_fill_page_desc(skb, i, page, offset, copy); - } else { - tx_skb_finalize(skb); - push_frames_if_head(sk); - goto new_buf; - } - - skb->len += copy; - if (skb->len == mss) - tx_skb_finalize(skb); - skb->data_len += copy; - skb->truesize += copy; - sk->sk_wmem_queued += copy; - tp->write_seq += copy; - copied += copy; - offset += copy; - size -= copy; - - if (corked(tp, flags) && - (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))) - ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND; - - if (!size) - break; - - if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)) - push_frames_if_head(sk); - continue; -wait_for_sndbuf: - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); -wait_for_memory: - err = csk_wait_memory(cdev, sk, &timeo); - if (err) - goto do_error; - } -out: - csk_reset_flag(csk, CSK_TX_MORE_DATA); - if (copied) - chtls_tcp_push(sk, flags); -done: - release_sock(sk); - return copied; - -do_error: - if (copied) - goto out; - -out_err: - if (csk_conn_inline(csk)) - csk_reset_flag(csk, CSK_TX_MORE_DATA); - copied = sk_stream_error(sk, flags, err); - goto done; + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return chtls_sendmsg(sk, &msg, size); } static void chtls_select_window(struct sock *sk)