Introduce a helper to check whether we will use indirect feature. It will be used by packed ring too. Signed-off-by: Tiwei Bie <tiwei.bie@xxxxxxxxx> --- drivers/virtio/virtio_ring.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 10d407910aa2..d1076f28c7e9 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -145,6 +145,18 @@ struct vring_virtqueue { #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) +static inline bool virtqueue_use_indirect(struct virtqueue *_vq, + unsigned int total_sg) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + /* + * If the host supports indirect descriptor tables, and we have multiple + * buffers, then go indirect. FIXME: tune this threshold + */ + return (vq->indirect && total_sg > 1 && vq->vq.num_free); +} + /* * Modern virtio devices have feature bits to specify whether they need a * quirk and bypass the IOMMU. If not there, just use the DMA API. @@ -324,9 +336,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, head = vq->free_head; - /* If the host supports indirect descriptor tables, and we have multiple - * buffers, then go indirect. FIXME: tune this threshold */ - if (vq->indirect && total_sg > 1 && vq->vq.num_free) + if (virtqueue_use_indirect(_vq, total_sg)) desc = alloc_indirect_split(_vq, total_sg, gfp); else { desc = NULL; -- 2.14.5 _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization