Wait until we have enough space in the virt queue to actually queue up our request. Avoids the guest spinning in case we have a non-zero amount of free entries but not enough for the request. Cc: stable@xxxxxxxxxxxxxxx Reported-by: Alain Magloire <amagloire@xxxxxxxxxxxxxx> Signed-off-by: Gerd Hoffmann <kraxel@xxxxxxxxxx> --- drivers/gpu/drm/virtio/virtgpu_vq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 48e4f1df6e..020070d483 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -293,7 +293,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->ctrlq.qlock); - wait_event(vgdev->ctrlq.ack_queue, vq->num_free); + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); spin_lock(&vgdev->ctrlq.qlock); goto retry; } else { @@ -368,7 +368,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->cursorq.qlock); - wait_event(vgdev->cursorq.ack_queue, vq->num_free); + wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); spin_lock(&vgdev->cursorq.qlock); goto retry; } else { -- 2.9.3