As 'record' is not same as 'message', rename current variables, comments and defines from 'record' concept to 'message'. Signed-off-by: Arseny Krasnov <arseny.krasnov@xxxxxxxxxxxxx> --- drivers/vhost/vsock.c | 18 +++++++++--------- net/vmw_vsock/virtio_transport_common.c | 14 +++++++------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index f249622ef11b..3b55de70ac77 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -114,7 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, size_t nbytes; size_t iov_len, payload_len; int head; - bool restore_flag = false; + bool restore_msg_eom_flag = false; spin_lock_bh(&vsock->send_pkt_list_lock); if (list_empty(&vsock->send_pkt_list)) { @@ -178,16 +178,16 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, * small rx buffers, headers of packets in rx queue are * created dynamically and are initialized with header * of current packet(except length). But in case of - * SOCK_SEQPACKET, we also must clear record delimeter - * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one - * packet with delimeter(which marks end of record), + * SOCK_SEQPACKET, we also must clear message delimeter + * bit(VIRTIO_VSOCK_SEQ_EOM). Otherwise, instead of one + * packet with delimeter(which marks end of message), * there will be sequence of packets with delimeter * bit set. After initialized header will be copied to * rx buffer, this bit will be restored. */ - if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) { - pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); - restore_flag = true; + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) { + pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); + restore_msg_eom_flag = true; } } @@ -224,8 +224,8 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, * to send it with the next available buffer. */ if (pkt->off < pkt->len) { - if (restore_flag) - pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); + if (restore_msg_eom_flag) + pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); /* We are queueing the same virtio_vsock_pkt to handle * the remaining bytes, and we want to deliver it diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 169ba8b72a63..dea564e4b482 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -77,7 +77,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, if (msg_data_left(info->msg) == 0 && info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) - pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); + pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); } trace_virtio_transport_alloc_pkt(src_cid, src_port, @@ -457,7 +457,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, dequeued_len += pkt_len; } - if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) { + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) { msg_ready = true; vvs->msg_count--; } @@ -1029,7 +1029,7 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk, goto out; } - if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) + if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) vvs->msg_count++; /* Try to copy small packets into the buffer of last packet queued, @@ -1044,12 +1044,12 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk, /* If there is space in the last packet queued, we copy the * new packet in its buffer. We avoid this if the last packet - * queued has VIRTIO_VSOCK_SEQ_EOR set, because this is - * delimiter of SEQPACKET record, so 'pkt' is the first packet - * of a new record. + * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is + * delimiter of SEQPACKET message, so 'pkt' is the first packet + * of a new message. */ if ((pkt->len <= last_pkt->buf_len - last_pkt->len) && - !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR)) { + !(le32_to_cpu(last_pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM)) { memcpy(last_pkt->buf + last_pkt->len, pkt->buf, pkt->len); last_pkt->len += pkt->len; -- 2.25.1