On Sun, Mar 07, 2021 at 09:03:09PM +0300, Arseny Krasnov wrote:
This adds rest of logic for SEQPACKET: 1) SEQPACKET specific functions which send SEQ_BEGIN/SEQ_END. Note that both functions may sleep to wait enough space for SEQPACKET header. 2) SEQ_BEGIN/SEQ_END in TAP packet capture. 3) Send SHUTDOWN on socket close for SEQPACKET type. 4) Set SEQPACKET packet type during send. 5) Set MSG_EOR in flags for SEQPACKET during send. 6) 'seqpacket_allow' flag to virtio transport. Signed-off-by: Arseny Krasnov <arseny.krasnov@xxxxxxxxxxxxx> --- include/linux/virtio_vsock.h | 8 +++ net/vmw_vsock/virtio_transport_common.c | 87 ++++++++++++++++++++++++- 2 files changed, 93 insertions(+), 2 deletions(-) diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index d7edcfeb4cd2..6b45a8b98226 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -22,6 +22,7 @@ struct virtio_vsock_seqpack_state { u32 user_read_seq_len; u32 user_read_copied; u32 curr_rx_msg_id; + u32 next_tx_msg_id; }; /* Per-socket state (accessed via vsk->trans) */ @@ -76,6 +77,8 @@ struct virtio_transport { /* Takes ownership of the packet */ int (*send_pkt)(struct virtio_vsock_pkt *pkt); + + bool seqpacket_allow; }; ssize_t @@ -90,6 +93,11 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk, size_t virtio_transport_seqpacket_seq_get_len(struct vsock_sock *vsk); int +virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, + struct msghdr *msg, + int flags, + size_t len); +int virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, struct msghdr *msg, int flags, diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 9d86375935ce..8e9fdd8aba5d 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -139,6 +139,8 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) break; case VIRTIO_VSOCK_OP_CREDIT_UPDATE: case VIRTIO_VSOCK_OP_CREDIT_REQUEST: + case VIRTIO_VSOCK_OP_SEQ_BEGIN: + case VIRTIO_VSOCK_OP_SEQ_END: hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL); break; default: @@ -187,7 +189,12 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, struct virtio_vsock_pkt *pkt; u32 pkt_len = info->pkt_len; - info->type = VIRTIO_VSOCK_TYPE_STREAM; + info->type = virtio_transport_get_type(sk_vsock(vsk)); + + if (info->type == VIRTIO_VSOCK_TYPE_SEQPACKET && + info->msg && + info->msg->msg_flags & MSG_EOR) + info->flags |= VIRTIO_VSOCK_RW_EOR; t_ops = virtio_transport_get_ops(vsk); if (unlikely(!t_ops)) @@ -401,6 +408,43 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, return err; } +static int virtio_transport_seqpacket_send_ctrl(struct vsock_sock *vsk, + int type, + size_t len, + int flags) +{ + struct virtio_vsock_sock *vvs = vsk->trans; + struct virtio_vsock_pkt_info info = { + .op = type, + .vsk = vsk, + .pkt_len = sizeof(struct virtio_vsock_seq_hdr) + }; + + struct virtio_vsock_seq_hdr seq_hdr = { + .msg_id = cpu_to_le32(vvs->seqpacket_state.next_tx_msg_id), + .msg_len = cpu_to_le32(len) + }; + + struct kvec seq_hdr_kiov = { + .iov_base = (void *)&seq_hdr, + .iov_len = sizeof(struct virtio_vsock_seq_hdr) + }; + + struct msghdr msg = {0}; + + //XXX: do we need 'vsock_transport_send_notify_data' pointer? + if (vsock_wait_space(sk_vsock(vsk), + sizeof(struct virtio_vsock_seq_hdr), + flags, NULL)) + return -1; + + iov_iter_kvec(&msg.msg_iter, WRITE, &seq_hdr_kiov, 1, sizeof(seq_hdr)); + + info.msg = &msg; + + return virtio_transport_send_pkt_info(vsk, &info); +} + static inline void virtio_transport_remove_pkt(struct virtio_vsock_pkt *pkt) { list_del(&pkt->list); @@ -582,6 +626,45 @@ virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, } EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue); +int +virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, + struct msghdr *msg, + int flags, + size_t len) +{ + int written; + + if (msg->msg_iter.iov_offset == 0) { + /* Send SEQBEGIN. */ + if (virtio_transport_seqpacket_send_ctrl(vsk, + VIRTIO_VSOCK_OP_SEQ_BEGIN, + len, + flags) < 0) + return -1; + } + + written = virtio_transport_stream_enqueue(vsk, msg, len); + + if (written < 0) + return -1; + + if (msg->msg_iter.count == 0) { + struct virtio_vsock_sock *vvs = vsk->trans; + + /* Send SEQEND. */ + if (virtio_transport_seqpacket_send_ctrl(vsk, + VIRTIO_VSOCK_OP_SEQ_END, + 0, + flags) < 0) + return -1; + + vvs->seqpacket_state.next_tx_msg_id++; + }
I suspect we should increment next_tx_msg_id even in case of an error to avoid issues with packets with same IDs, so in case of error I would do:
if (/* error */) { written = -1; goto out; } Then we can add the 'out' label and the id increment: out: vvs->seqpacket_state.next_tx_msg_id++;
+ + return written; +} +EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue); + int virtio_transport_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, @@ -1001,7 +1084,7 @@ void virtio_transport_release(struct vsock_sock *vsk) struct sock *sk = &vsk->sk; bool remove_sock = true; - if (sk->sk_type == SOCK_STREAM) + if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) remove_sock = virtio_transport_close(vsk); list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) { -- 2.25.1
_______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization