From: Rob Clark <robdclark@xxxxxxxxxxxx> For a long time drm/msm had been abusing dma_map_* or dma_sync_* to clean pages for buffers with uncached/writecombine CPU mmap'ings. But drm/msm is managing it's own iommu domains, and really doesn't want the additional functionality provided by various DMA API ops. Let's just cut the abstraction and use drm_cache where possible. Signed-off-by: Rob Clark <robdclark@xxxxxxxxxxxx> --- drivers/gpu/drm/msm/msm_gem.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 8cf6362e64bf..af19ef20d0d5 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -9,6 +9,8 @@ #include <linux/dma-buf.h> #include <linux/pfn_t.h> +#include <drm/drm_cache.h> + #include "msm_drv.h" #include "msm_fence.h" #include "msm_gem.h" @@ -48,6 +50,7 @@ static bool use_pages(struct drm_gem_object *obj) static void sync_for_device(struct msm_gem_object *msm_obj) { +#if !defined(HAS_DRM_CACHE) struct device *dev = msm_obj->base.dev->dev; if (get_dma_ops(dev)) { @@ -57,10 +60,14 @@ static void sync_for_device(struct msm_gem_object *msm_obj) dma_map_sg(dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); } +#else + drm_clflush_sg(msm_obj->sgt); +#endif } static void sync_for_cpu(struct msm_gem_object *msm_obj) { +#if !defined(HAS_DRM_CACHE) struct device *dev = msm_obj->base.dev->dev; if (get_dma_ops(dev)) { @@ -70,6 +77,7 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj) dma_unmap_sg(dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); } +#endif } /* allocate pages from VRAM carveout, used when no IOMMU: */ -- 2.21.0 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel