We've found the DMA API is effective for flushing the cache on ARM devices, and it requires a struct device *. Signed-off-by: Gurchetan Singh <gurchetansingh@xxxxxxxxxxxx> --- drivers/gpu/drm/drm_cache.c | 5 +++-- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_clflush.c | 2 +- drivers/gpu/drm/ttm/ttm_tt.c | 2 +- drivers/gpu/drm/vgem/vgem_drv.c | 2 +- include/drm/drm_cache.h | 5 +++-- 6 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 89cdd32fe1f3..3d2bb9d71a60 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -78,7 +78,8 @@ static void drm_cache_flush_clflush(struct page *pages[], * to a page in the array. */ void -drm_flush_pages(struct page *pages[], unsigned long num_pages) +drm_flush_pages(struct device *dev, struct page *pages[], + unsigned long num_pages) { #if defined(CONFIG_X86) @@ -119,7 +120,7 @@ EXPORT_SYMBOL(drm_flush_pages); * sg. */ void -drm_flush_sg(struct sg_table *st) +drm_flush_sg(struct device *dev, struct sg_table *st) { #if defined(CONFIG_X86) if (static_cpu_has(X86_FEATURE_CLFLUSH)) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fe191d0e84e1..045866f2b5dd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -259,7 +259,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, if (needs_clflush && (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) - drm_flush_sg(pages); + drm_flush_sg(obj->base.dev->dev, pages); __start_cpu_write(obj); } diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c index f413c5e5735d..b5938f14141f 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/i915_gem_clflush.c @@ -71,7 +71,7 @@ static const struct dma_fence_ops i915_clflush_ops = { static void __i915_do_clflush(struct drm_i915_gem_object *obj) { GEM_BUG_ON(!i915_gem_object_has_pages(obj)); - drm_flush_sg(obj->mm.pages); + drm_flush_sg(obj->base.dev->dev, obj->mm.pages); intel_fb_obj_flush(obj, ORIGIN_CPU); } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 59e272a58752..fb2382d01bba 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -122,7 +122,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, } if (ttm->caching_state == tt_cached) - drm_flush_pages(ttm->pages, ttm->num_pages); + drm_flush_pages(NULL, ttm->pages, ttm->num_pages); for (i = 0; i < ttm->num_pages; ++i) { cur_page = ttm->pages[i]; diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 802a97e1a4bf..35bfdfb746a7 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -325,7 +325,7 @@ static int vgem_prime_pin(struct drm_gem_object *obj) /* Flush the object from the CPU cache so that importers can rely * on coherent indirect access via the exported dma-address. */ - drm_flush_pages(pages, n_pages); + drm_flush_pages(obj->dev->dev, pages, n_pages); return 0; } diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index 25c029470315..cb77315dd8dd 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -35,8 +35,9 @@ #include <linux/scatterlist.h> -void drm_flush_pages(struct page *pages[], unsigned long num_pages); -void drm_flush_sg(struct sg_table *st); +void drm_flush_pages(struct device *dev, struct page *pages[], + unsigned long num_pages); +void drm_flush_sg(struct device *dev, struct sg_table *st); void drm_clflush_virt_range(void *addr, unsigned long length); static inline bool drm_arch_can_wc_memory(void) -- 2.13.5 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel