[RFC 2/5] virtio_ring: introduce virtqueue_map/unmap_sgs()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Jason Wang <jasowang@xxxxxxxxxx>

Introduce new virtqueue DMA operations which allows the drivers that
want to make use of the premapping API but operate at the sg level.

Note that we still follow the assumtions if virtqueue_add() so
dma_map_sg() is not used. This could be optimized in the future.
Signed-off-by: Jason Wang <jasowang@xxxxxxxxxx>
Signed-off-by: Eugenio Pérez <eperezma@xxxxxxxxxx>
--
Eugenio's changes: Remove blank
TODO: Should we call directly dma_map instead of this? XDP do the direct
call.
---
 drivers/virtio/virtio_ring.c | 128 +++++++++++++++++++++++++++++++----
 include/linux/virtio.h       |  10 +++
 2 files changed, 125 insertions(+), 13 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fdd2d2b07b5a..05729bc5cbb1 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -359,6 +359,26 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
 	return vq->dma_dev;
 }
 
+static int __vring_map_one_sg(const struct vring_virtqueue *vq,
+			      struct scatterlist *sg,
+			      enum dma_data_direction direction,
+			      dma_addr_t *addr)
+{
+	/*
+	 * We can't use dma_map_sg, because we don't use scatterlists in
+	 * the way it expects (we don't guarantee that the scatterlist
+	 * will exist for the lifetime of the mapping).
+	 */
+	*addr = dma_map_page(vring_dma_dev(vq),
+			    sg_page(sg), sg->offset, sg->length,
+			    direction);
+
+	if (dma_mapping_error(vring_dma_dev(vq), *addr))
+		return -ENOMEM;
+
+	return 0;
+}
+
 /* Map one sg entry. */
 static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
 			    enum dma_data_direction direction, dma_addr_t *addr,
@@ -383,19 +403,7 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
 		return 0;
 	}
 
-	/*
-	 * We can't use dma_map_sg, because we don't use scatterlists in
-	 * the way it expects (we don't guarantee that the scatterlist
-	 * will exist for the lifetime of the mapping).
-	 */
-	*addr = dma_map_page(vring_dma_dev(vq),
-			    sg_page(sg), sg->offset, sg->length,
-			    direction);
-
-	if (dma_mapping_error(vring_dma_dev(vq), *addr))
-		return -ENOMEM;
-
-	return 0;
+	return __vring_map_one_sg(vq, sg, direction, addr);
 }
 
 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
@@ -526,6 +534,100 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
 	return next;
 }
 
+void virtqueue_unmap_sgs(struct virtqueue *_vq,
+			 struct scatterlist *sgs[],
+			 unsigned int out_sgs,
+			 unsigned int in_sgs)
+{
+	struct vring_virtqueue *vq = to_vvq(_vq);
+	struct scatterlist *sg;
+	int n;
+
+	for (n = 0; n < out_sgs; n++) {
+		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+			dma_unmap_page(vring_dma_dev(vq),
+				       sg_dma_address(sg),
+				       sg->length,
+				       DMA_TO_DEVICE);
+		}
+	}
+
+	for (; n < (out_sgs + in_sgs); n++) {
+		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+			dma_unmap_page(vring_dma_dev(vq),
+				       sg_dma_address(sg),
+				       sg->length,
+				       DMA_FROM_DEVICE);
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(virtqueue_unmap_sgs);
+
+int virtqueue_map_sgs(struct virtqueue *_vq,
+		      struct scatterlist *sgs[],
+		      unsigned int out_sgs,
+		      unsigned int in_sgs)
+{
+	struct vring_virtqueue *vq = to_vvq(_vq);
+	int i, n, mapped_sg = 0;
+	struct scatterlist *sg;
+
+	for (n = 0; n < out_sgs; n++) {
+		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+			dma_addr_t addr;
+
+			if (__vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
+				goto unmap_release;
+
+			sg_dma_address(sg) = addr;
+			mapped_sg++;
+		}
+	}
+
+	for (; n < (out_sgs + in_sgs); n++) {
+		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+			dma_addr_t addr;
+
+			if (__vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
+				goto unmap_release;
+
+			sg_dma_address(sg) = addr;
+			mapped_sg++;
+		}
+	}
+
+	return 0;
+
+unmap_release:
+	i = 0;
+
+	for (n = 0; n < out_sgs; n++) {
+		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+			if (i++ == mapped_sg)
+				goto out;
+			dma_unmap_page(vring_dma_dev(vq),
+				       sg_dma_address(sg),
+				       sg->length,
+				       DMA_TO_DEVICE);
+		}
+	}
+
+	for (; n < (out_sgs + in_sgs); n++) {
+		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+
+			if (i++ == mapped_sg)
+				goto out;
+			dma_unmap_page(vring_dma_dev(vq),
+				       sg_dma_address(sg),
+				       sg->length,
+				       DMA_FROM_DEVICE);
+		}
+	}
+out:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_sgs);
+
 static inline int virtqueue_add_split(struct virtqueue *_vq,
 				      struct scatterlist *sgs[],
 				      unsigned int total_sg,
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index dd88682e27e3..28db998d691e 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -67,6 +67,16 @@ int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
 				   void *data,
 				   gfp_t gfp);
 
+int virtqueue_map_sgs(struct virtqueue *_vq,
+		      struct scatterlist *sgs[],
+		      unsigned int out_sgs,
+		      unsigned int in_sgs);
+
+void virtqueue_unmap_sgs(struct virtqueue *_vq,
+			 struct scatterlist *sgs[],
+			 unsigned int out_sgs,
+			 unsigned int in_sgs);
+
 int virtqueue_add_sgs(struct virtqueue *vq,
 		      struct scatterlist *sgs[],
 		      unsigned int out_sgs,
-- 
2.48.1





[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux