This removes: 1) Ignore of non-stream type of packets. This adds: 1) Handling of SEQPACKET bit: if guest sets features with this bit cleared, then SOCK_SEQPACKET support will be disabled. 2) 'seqpacket_allow()' callback. 3) Handling of SEQ_EOR bit: when vhost places data in buffers of guest's rx queue, keep this bit set only when last piece of data is copied. Signed-off-by: Arseny Krasnov <arseny.krasnov@xxxxxxxxxxxxx> --- v8 -> v9: 1) Move 'seqpacket_allow' to 'struct vhost_vsock'. 2) Use cpu_to_le32()/le32_to_cpu() to work with 'flags' of packet. drivers/vhost/vsock.c | 42 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 5e78fb719602..3395b25d4a35 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -31,7 +31,8 @@ enum { VHOST_VSOCK_FEATURES = VHOST_FEATURES | - (1ULL << VIRTIO_F_ACCESS_PLATFORM) + (1ULL << VIRTIO_F_ACCESS_PLATFORM) | + (1ULL << VIRTIO_VSOCK_F_SEQPACKET) }; enum { @@ -56,6 +57,7 @@ struct vhost_vsock { atomic_t queued_replies; u32 guest_cid; + bool seqpacket_allow; }; static u32 vhost_transport_get_local_cid(void) @@ -112,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, size_t nbytes; size_t iov_len, payload_len; int head; + bool restore_flag = false; spin_lock_bh(&vsock->send_pkt_list_lock); if (list_empty(&vsock->send_pkt_list)) { @@ -174,6 +177,12 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, /* Set the correct length in the header */ pkt->hdr.len = cpu_to_le32(payload_len); + if (pkt->off + payload_len < pkt->len && + le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) { + pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); + restore_flag = true; + } + nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); if (nbytes != sizeof(pkt->hdr)) { virtio_transport_free_pkt(pkt); @@ -181,6 +190,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } + if (restore_flag) + pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); + nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len, &iov_iter); if (nbytes != payload_len) { @@ -354,8 +366,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, return NULL; } - if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM) - pkt->len = le32_to_cpu(pkt->hdr.len); + pkt->len = le32_to_cpu(pkt->hdr.len); /* No payload */ if (!pkt->len) @@ -398,6 +409,8 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) return val < vq->num; } +static bool vhost_transport_seqpacket_allow(u32 remote_cid); + static struct virtio_transport vhost_transport = { .transport = { .module = THIS_MODULE, @@ -424,6 +437,10 @@ static struct virtio_transport vhost_transport = { .stream_is_active = virtio_transport_stream_is_active, .stream_allow = virtio_transport_stream_allow, + .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, + .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, + .seqpacket_allow = vhost_transport_seqpacket_allow, + .notify_poll_in = virtio_transport_notify_poll_in, .notify_poll_out = virtio_transport_notify_poll_out, .notify_recv_init = virtio_transport_notify_recv_init, @@ -441,6 +458,22 @@ static struct virtio_transport vhost_transport = { .send_pkt = vhost_transport_send_pkt, }; +static bool vhost_transport_seqpacket_allow(u32 remote_cid) +{ + struct vhost_vsock *vsock; + bool seqpacket_allow = false; + + rcu_read_lock(); + vsock = vhost_vsock_get(remote_cid); + + if (vsock) + seqpacket_allow = vsock->seqpacket_allow; + + rcu_read_unlock(); + + return seqpacket_allow; +} + static void vhost_vsock_handle_tx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, @@ -785,6 +818,9 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) goto err; } + if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET)) + vsock->seqpacket_allow = true; + for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); -- 2.25.1