Re: [RFC PATCH v2 08/11] iommu/dma: Support PCI P2PDMA pages in dma-iommu map_sg

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2021-03-11 23:31, Logan Gunthorpe wrote:
When a PCI P2PDMA page is seen, set the IOVA length of the segment
to zero so that it is not mapped into the IOVA. Then, in finalise_sg(),
apply the appropriate bus address to the segment. The IOVA is not
created if the scatterlist only consists of P2PDMA pages.

This misled me at first, but I see the implementation does actually appear to accomodate the case of working ACS where P2P *would* still need to be mapped at the IOMMU.

Similar to dma-direct, the sg_mark_pci_p2pdma() flag is used to
indicate bus address segments. On unmap, P2PDMA segments are skipped
over when determining the start and end IOVA addresses.

With this change, the flags variable in the dma_map_ops is
set to DMA_F_PCI_P2PDMA_SUPPORTED to indicate support for
P2PDMA pages.

Signed-off-by: Logan Gunthorpe <logang@xxxxxxxxxxxx>
---
  drivers/iommu/dma-iommu.c | 63 ++++++++++++++++++++++++++++++++-------
  1 file changed, 53 insertions(+), 10 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index af765c813cc8..c0821e9051a9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,6 +20,7 @@
  #include <linux/mm.h>
  #include <linux/mutex.h>
  #include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
  #include <linux/swiotlb.h>
  #include <linux/scatterlist.h>
  #include <linux/vmalloc.h>
@@ -846,7 +847,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
   * segment's start address to avoid concatenating across one.
   */
  static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
-		dma_addr_t dma_addr)
+		dma_addr_t dma_addr, unsigned long attrs)
  {
  	struct scatterlist *s, *cur = sg;
  	unsigned long seg_mask = dma_get_seg_boundary(dev);
@@ -864,6 +865,20 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
  		sg_dma_address(s) = DMA_MAPPING_ERROR;
  		sg_dma_len(s) = 0;
+ if (is_pci_p2pdma_page(sg_page(s)) && !s_iova_len) {
+			if (i > 0)
+				cur = sg_next(cur);
+
+			sg_dma_address(cur) = sg_phys(s) + s->offset -

Are you sure about that? ;)

+				pci_p2pdma_bus_offset(sg_page(s));

Can the bus offset make P2P addresses overlap with regions of mem space that we might use for regular IOVA allocation? That would be very bad...

+			sg_dma_len(cur) = s->length;
+			sg_mark_pci_p2pdma(cur);
+
+			count++;
+			cur_len = 0;
+			continue;
+		}
+
  		/*
  		 * Now fill in the real DMA data. If...
  		 * - there is a valid output segment to append to
@@ -960,11 +975,12 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iova_domain *iovad = &cookie->iovad;
  	struct scatterlist *s, *prev = NULL;
+	struct dev_pagemap *pgmap = NULL;
  	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
  	dma_addr_t iova;
  	size_t iova_len = 0;
  	unsigned long mask = dma_get_seg_boundary(dev);
-	int i;
+	int i, map = -1, ret = 0;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
  	    iommu_deferred_attach(dev, domain))
@@ -993,6 +1009,23 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
  		s_length = iova_align(iovad, s_length + s_iova_off);
  		s->length = s_length;
+ if (is_pci_p2pdma_page(sg_page(s))) {
+			if (sg_page(s)->pgmap != pgmap) {
+				pgmap = sg_page(s)->pgmap;
+				map = pci_p2pdma_dma_map_type(dev, pgmap);
+			}
+
+			if (map < 0) {

It rather feels like it should be the job of whoever creates the list in the first place not to put unusable pages in it, especially since the p2pdma_map_type looks to be a fairly coarse-grained and static thing. The DMA API isn't responsible for validating normal memory pages, so what makes P2P special?

+				ret = -EREMOTEIO;
+				goto out_restore_sg;
+			}
+
+			if (map) {
+				s->length = 0;

I'm not really thrilled about the idea of passing zero-length segments to iommu_map_sg(). Yes, it happens to trick the concatenation logic in the current implementation into doing what you want, but it feels fragile.

+				continue;
+			}
+		}
+
  		/*
  		 * Due to the alignment of our single IOVA allocation, we can
  		 * depend on these assumptions about the segment boundary mask:
@@ -1015,6 +1048,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
  		prev = s;
  	}
+ if (!iova_len)
+		return __finalise_sg(dev, sg, nents, 0, attrs);
+
  	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
  	if (!iova)
  		goto out_restore_sg;
@@ -1026,19 +1062,19 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
  	if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
  		goto out_free_iova;
- return __finalise_sg(dev, sg, nents, iova);
+	return __finalise_sg(dev, sg, nents, iova, attrs);
out_free_iova:
  	iommu_dma_free_iova(cookie, iova, iova_len, NULL);
  out_restore_sg:
  	__invalidate_sg(sg, nents);
-	return 0;
+	return ret;
  }
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  		int nents, enum dma_data_direction dir, unsigned long attrs)
  {
-	dma_addr_t start, end;
+	dma_addr_t end, start = DMA_MAPPING_ERROR;
  	struct scatterlist *tmp;
  	int i;
@@ -1054,14 +1090,20 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  	 * The scatterlist segments are mapped into a single
  	 * contiguous IOVA allocation, so this is incredibly easy.
  	 */
-	start = sg_dma_address(sg);
-	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+	for_each_sg(sg, tmp, nents, i) {
+		if (sg_is_pci_p2pdma(tmp))

Since the flag is associated with the DMA address which will no longer be valid, shouldn't it be cleared? The circumstances in which leaving it around could cause a problem are tenuous, but definitely possible.

Robin.

+			continue;
  		if (sg_dma_len(tmp) == 0)
  			break;
-		sg = tmp;
+
+		if (start == DMA_MAPPING_ERROR)
+			start = sg_dma_address(tmp);
+
+		end = sg_dma_address(tmp) + sg_dma_len(tmp);
  	}
-	end = sg_dma_address(sg) + sg_dma_len(sg);
-	__iommu_dma_unmap(dev, start, end - start);
+
+	if (start != DMA_MAPPING_ERROR)
+		__iommu_dma_unmap(dev, start, end - start);
  }
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -1254,6 +1296,7 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
  }
static const struct dma_map_ops iommu_dma_ops = {
+	.flags			= DMA_F_PCI_P2PDMA_SUPPORTED,
  	.alloc			= iommu_dma_alloc,
  	.free			= iommu_dma_free,
  	.alloc_pages		= dma_common_alloc_pages,




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux