This adds transport callback and it's logic for SEQPACKET dequeue. Callback fetches RW packets from rx queue of socket until whole record is copied(if user's buffer is full, user is not woken up). This is done to not stall sender, because if we wake up user and it leaves syscall, nobody will send credit update for rest of record, and sender will wait for next enter of read syscall at receiver's side. So if user buffer is full, we just send credit update and drop data. Signed-off-by: Arseny Krasnov <arseny.krasnov@xxxxxxxxxxxxx> --- v8 -> v9: 1) Check for RW packet type is removed from loop(all packet now considered RW). 2) Locking in loop is fixed. 3) cpu_to_le32()/le32_to_cpu() now used. 4) MSG_TRUNC handling removed from transport. include/linux/virtio_vsock.h | 5 ++ net/vmw_vsock/virtio_transport_common.c | 64 +++++++++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index dc636b727179..02acf6e9ae04 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -80,6 +80,11 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags); +ssize_t +virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, + struct msghdr *msg, + int flags, + bool *msg_ready); s64 virtio_transport_stream_has_data(struct vsock_sock *vsk); s64 virtio_transport_stream_has_space(struct vsock_sock *vsk); diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index ad0d34d41444..f649a21dd23b 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -393,6 +393,58 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, return err; } +static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, + struct msghdr *msg, + int flags, + bool *msg_ready) +{ + struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt *pkt; + int err = 0; + size_t user_buf_len = msg->msg_iter.count; + + *msg_ready = false; + spin_lock_bh(&vvs->rx_lock); + + while (!*msg_ready && !list_empty(&vvs->rx_queue) && err >= 0) { + size_t bytes_to_copy; + size_t pkt_len; + + pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list); + pkt_len = (size_t)le32_to_cpu(pkt->hdr.len); + bytes_to_copy = min(user_buf_len, pkt_len); + + if (bytes_to_copy) { + /* sk_lock is held by caller so no one else can dequeue. + * Unlock rx_lock since memcpy_to_msg() may sleep. + */ + spin_unlock_bh(&vvs->rx_lock); + + if (memcpy_to_msg(msg, pkt->buf, bytes_to_copy)) { + err = -EINVAL; + } else { + err += pkt_len; + user_buf_len -= bytes_to_copy; + } + + spin_lock_bh(&vvs->rx_lock); + } + + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) + *msg_ready = true; + + virtio_transport_dec_rx_pkt(vvs, pkt); + list_del(&pkt->list); + virtio_transport_free_pkt(pkt); + } + + spin_unlock_bh(&vvs->rx_lock); + + virtio_transport_send_credit_update(vsk); + + return err; +} + ssize_t virtio_transport_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg, @@ -405,6 +457,18 @@ virtio_transport_stream_dequeue(struct vsock_sock *vsk, } EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue); +ssize_t +virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, + struct msghdr *msg, + int flags, bool *msg_ready) +{ + if (flags & MSG_PEEK) + return -EOPNOTSUPP; + + return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags, msg_ready); +} +EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue); + int virtio_transport_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, -- 2.25.1