Re: [PATCH v5 14/16] dma-direct: Allocate memory from restricted DMA pool if available

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2021-04-22 09:15, Claire Chang wrote:
The restricted DMA pool is preferred if available.

The restricted DMA pools provide a basic level of protection against the
DMA overwriting buffer contents at unexpected times. However, to protect
against general data leakage and system memory corruption, the system
needs to provide a way to lock down the memory access, e.g., MPU.

Signed-off-by: Claire Chang <tientzu@xxxxxxxxxxxx>
---
  kernel/dma/direct.c | 35 ++++++++++++++++++++++++++---------
  1 file changed, 26 insertions(+), 9 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 7a27f0510fcc..29523d2a9845 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -78,6 +78,10 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
  static void __dma_direct_free_pages(struct device *dev, struct page *page,
  				    size_t size)
  {
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+	if (swiotlb_free(dev, page, size))
+		return;
+#endif
  	dma_free_contiguous(dev, page, size);
  }
@@ -92,7 +96,17 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
  					   &phys_limit);
-	page = dma_alloc_contiguous(dev, size, gfp);
+
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+	page = swiotlb_alloc(dev, size);
+	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+		__dma_direct_free_pages(dev, page, size);
+		page = NULL;
+	}
+#endif
+
+	if (!page)
+		page = dma_alloc_contiguous(dev, size, gfp);
  	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
  		dma_free_contiguous(dev, page, size);
  		page = NULL;
@@ -148,7 +162,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
  		gfp |= __GFP_NOWARN;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-	    !force_dma_unencrypted(dev)) {
+	    !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
  		page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
  		if (!page)
  			return NULL;
@@ -161,8 +175,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
  	}
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	    !dev_is_dma_coherent(dev))
+	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
+	    !is_dev_swiotlb_force(dev))
  		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
/*
@@ -172,7 +186,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
  	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
  	    !gfpflags_allow_blocking(gfp) &&
  	    (force_dma_unencrypted(dev) ||
-	     (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
+	     (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+	      !dev_is_dma_coherent(dev))) &&
+	    !is_dev_swiotlb_force(dev))
  		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
@@ -253,15 +269,15 @@ void dma_direct_free(struct device *dev, size_t size,
  	unsigned int page_order = get_order(size);
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-	    !force_dma_unencrypted(dev)) {
+	    !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
  		/* cpu_addr is a struct page cookie, not a kernel address */
  		dma_free_contiguous(dev, cpu_addr, size);
  		return;
  	}
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-	    !dev_is_dma_coherent(dev)) {
+	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
+	    !is_dev_swiotlb_force(dev)) {
  		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
  		return;
  	}
@@ -289,7 +305,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
  	void *ret;
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
+	    force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+	    !is_dev_swiotlb_force(dev))
  		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);

Wait, this seems broken for non-coherent devices - in that case we need to return a non-cacheable address, but we can't simply fall through into the remapping path below in GFP_ATOMIC context. That's why we need the atomic pool concept in the first place :/

Unless I've overlooked something, we're still using the regular cacheable linear map address of the dma_io_tlb_mem buffer, no?

Robin.

page = __dma_direct_alloc_pages(dev, size, gfp);




[Index of Archives]     [DMA Engine]     [Linux Coverity]     [Linux USB]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Greybus]

  Powered by Linux