dma-bufs are device coherent, with explicit CPU synchronisation provided via the begin/end cpu access ioctls. As the coherency of the dma-buf is explicitly defined to be under user control, flushing any caches on attach/detach of the dma-buf is additional work that doesn't aide the user in the slightest. Suggested-by: Lucas Stach <l.stach@xxxxxxxxxxxxxx> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 864439a214c8..68c8bfd8eb7a 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -67,7 +67,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme src = sg_next(src); } - if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { + if (!dma_map_sg_attrs(attachment->dev, + st->sgl, st->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC)) { ret = -ENOMEM; goto err_free_sg; } @@ -90,7 +92,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, { struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + dma_unmap_sg_attrs(attachment->dev, + sg->sgl, sg->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sg); kfree(sg); -- 2.15.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx