From: Guangming Cao <Guangming.Cao@xxxxxxxxxxxx> For dma-heap users, they can't bypass cache sync when map/unmap iova with dma heap. But they can do it by adding DMA_ATTR_SKIP_CPU_SYNC into dma_alloc_attrs. To keep alignment, at dma_heap side, also use dma_buf_attachment.dma_map_attrs to do iova map & unmap. Signed-off-by: Guangming Cao <Guangming.Cao@xxxxxxxxxxxx> --- drivers/dma-buf/heaps/cma_heap.c | 6 ++++-- drivers/dma-buf/heaps/system_heap.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 0c05b79870f9..2c9feb3bfc3e 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -99,9 +99,10 @@ static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachme { struct dma_heap_attachment *a = attachment->priv; struct sg_table *table = &a->table; + int attrs = attachment->dma_map_attrs; int ret; - ret = dma_map_sgtable(attachment->dev, table, direction, 0); + ret = dma_map_sgtable(attachment->dev, table, direction, attrs); if (ret) return ERR_PTR(-ENOMEM); a->mapped = true; @@ -113,9 +114,10 @@ static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_heap_attachment *a = attachment->priv; + int attrs = attachment->dma_map_attrs; a->mapped = false; - dma_unmap_sgtable(attachment->dev, table, direction, 0); + dma_unmap_sgtable(attachment->dev, table, direction, attrs); } static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 23a7e74ef966..fc7b1e02988e 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -130,9 +130,10 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac { struct dma_heap_attachment *a = attachment->priv; struct sg_table *table = a->table; + int attrs = attachment->dma_map_attrs; int ret; - ret = dma_map_sgtable(attachment->dev, table, direction, 0); + ret = dma_map_sgtable(attachment->dev, table, direction, attrs); if (ret) return ERR_PTR(ret); @@ -145,9 +146,10 @@ static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_heap_attachment *a = attachment->priv; + int attrs = attachment->dma_map_attrs; a->mapped = false; - dma_unmap_sgtable(attachment->dev, table, direction, 0); + dma_unmap_sgtable(attachment->dev, table, direction, attrs); } static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, -- 2.17.1