Re: [PATCHv7 5/9] ARM: dma-mapping: implement dma sg methods on top of any generic dma ops

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Marek,

As per our discussion over IRC, I would like to check with you the feasibility to extend the dma operation calls for the coherent regions. You said that since struct page wont be available for the buffers in these regions, functions like arm_dma_map_sg() (below) will fail in the cache maintenance operations. Infact, I am facing this issue when I integrated dma-buf buffer sharing + v4l2/vb2 exporter patch series + dma_mapping (v7) and use coherent memory.

So I want to know opinion of you, others about extending the dma-mapping framework for scenarios which use device coherent memory.

Regards,
Subash

On 02/29/2012 08:34 PM, Marek Szyprowski wrote:
This patch converts all dma_sg methods to be generic (independent of the
current DMA mapping implementation for ARM architecture). All dma sg
operations are now implemented on top of respective
dma_map_page/dma_sync_single_for* operations from dma_map_ops structure.

Signed-off-by: Marek Szyprowski<m.szyprowski@xxxxxxxxxxx>
Signed-off-by: Kyungmin Park<kyungmin.park@xxxxxxxxxxx>
---
  arch/arm/mm/dma-mapping.c |   43 +++++++++++++++++++------------------------
  1 files changed, 19 insertions(+), 24 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cd5ed8d..a5a0b5b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -616,7 +616,7 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
  EXPORT_SYMBOL(___dma_page_dev_to_cpu);

  /**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
   * @sg: list of buffers
   * @nents: number of buffers to map
@@ -634,12 +634,13 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
  int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  		enum dma_data_direction dir, struct dma_attrs *attrs)
  {
+	struct dma_map_ops *ops = get_dma_ops(dev);
  	struct scatterlist *s;
  	int i, j;

  	for_each_sg(sg, s, nents, i) {
-		s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
-						s->length, dir);
+		s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+						s->length, dir, attrs);
  		if (dma_mapping_error(dev, s->dma_address))
  			goto bad_mapping;
  	}
@@ -647,12 +648,12 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,

   bad_mapping:
  	for_each_sg(sg, s, i, j)
-		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
  	return 0;
  }

  /**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
   * @sg: list of buffers
   * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -664,15 +665,17 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  		enum dma_data_direction dir, struct dma_attrs *attrs)
  {
+	struct dma_map_ops *ops = get_dma_ops(dev);
  	struct scatterlist *s;
+
  	int i;

  	for_each_sg(sg, s, nents, i)
-		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
  }

  /**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
   * @sg: list of buffers
   * @nents: number of buffers to map (returned from dma_map_sg)
@@ -681,21 +684,17 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  			int nents, enum dma_data_direction dir)
  {
+	struct dma_map_ops *ops = get_dma_ops(dev);
  	struct scatterlist *s;
  	int i;

-	for_each_sg(sg, s, nents, i) {
-		if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s),
-					    sg_dma_len(s), dir))
-			continue;
-
-		__dma_page_dev_to_cpu(sg_page(s), s->offset,
-				      s->length, dir);
-	}
+	for_each_sg(sg, s, nents, i)
+		ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+					 dir);
  }

  /**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
   * @sg: list of buffers
   * @nents: number of buffers to map (returned from dma_map_sg)
@@ -704,17 +703,13 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  			int nents, enum dma_data_direction dir)
  {
+	struct dma_map_ops *ops = get_dma_ops(dev);
  	struct scatterlist *s;
  	int i;

-	for_each_sg(sg, s, nents, i) {
-		if (!dmabounce_sync_for_device(dev, sg_dma_address(s),
-					sg_dma_len(s), dir))
-			continue;
-
-		__dma_page_cpu_to_dev(sg_page(s), s->offset,
-				      s->length, dir);
-	}
+	for_each_sg(sg, s, nents, i)
+		ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+					    dir);
  }

  /*
--
To unsubscribe from this list: send the line "unsubscribe linux-samsung-soc" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux SoC Development]     [Linux Rockchip Development]     [Linux USB Development]     [Video for Linux]     [Linux Audio Users]     [Linux SCSI]     [Yosemite News]

  Powered by Linux