If the virtio device supports indirect ring descriptors we need only one ring entry for the whole command. Take that into account when checking whenever the virtqueue has enough free entries for our command. Signed-off-by: Gerd Hoffmann <kraxel@xxxxxxxxxx> --- drivers/gpu/drm/virtio/virtgpu_vq.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 41e475fbd67b..a2ec09dba530 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -328,7 +328,8 @@ static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, { struct virtqueue *vq = vgdev->ctrlq.vq; bool notify = false; - int ret; + bool indirect; + int vqcnt, ret; again: spin_lock(&vgdev->ctrlq.qlock); @@ -341,9 +342,11 @@ static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, return; } - if (vq->num_free < elemcnt) { + indirect = virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC); + vqcnt = indirect ? 1 : elemcnt; + if (vq->num_free < vqcnt) { spin_unlock(&vgdev->ctrlq.qlock); - wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= vqcnt); goto again; } -- 2.18.1 _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization