Batch sending rx buffer can improve total bandwidth. Signed-off-by: Yiwen Jiang <jiangyiwen@xxxxxxxxxx> --- drivers/vhost/vsock.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 9600133..a4bf0a1 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -151,9 +151,11 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, struct vhost_virtqueue *vq) { struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; - bool added = false; bool restart_tx = false; size_t vsock_hlen; + int batch_count = 0; + +#define VHOST_VSOCK_BATCH 16 mutex_lock(&vq->mutex); @@ -194,8 +196,9 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, list_del_init(&pkt->list); spin_unlock_bh(&vsock->send_pkt_list_lock); - headcount = get_rx_bufs(vq, vq->heads, vsock_hlen + pkt->len, - &in, likely(vsock->mergeable) ? UIO_MAXIOV : 1); + headcount = get_rx_bufs(vq, vq->heads + batch_count, + vsock_hlen + pkt->len, &in, + likely(vsock->mergeable) ? UIO_MAXIOV : 1); if (headcount <= 0) { spin_lock_bh(&vsock->send_pkt_list_lock); list_add(&pkt->list, &vsock->send_pkt_list); @@ -238,8 +241,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, remain_len -= tmp_len; } - vhost_add_used_n(vq, vq->heads, headcount); - added = true; + batch_count += headcount; + if (batch_count > VHOST_VSOCK_BATCH) { + vhost_add_used_and_signal_n(&vsock->dev, vq, + vq->heads, batch_count); + batch_count = 0; + } if (pkt->reply) { int val; @@ -258,8 +265,11 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, virtio_transport_free_pkt(pkt); } - if (added) - vhost_signal(&vsock->dev, vq); + + if (batch_count) { + vhost_add_used_and_signal_n(&vsock->dev, vq, + vq->heads, batch_count); + } out: mutex_unlock(&vq->mutex); -- 1.8.3.1