Add caching field to drm_gem_shmem_object to specify the cachine attributes for mappings. Add helper function to tweak pgprot accordingly. Switch vmap and mmap functions to the new helper. Set caching to write-combine when creating the object so behavior doesn't change by default. Drivers can override that later if needed. Signed-off-by: Gerd Hoffmann <kraxel@xxxxxxxxxx> --- include/drm/drm_gem_shmem_helper.h | 12 ++++++++++++ drivers/gpu/drm/drm_gem_shmem_helper.c | 24 +++++++++++++++++++++--- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 6748379a0b44..9d6e02c6205f 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -17,6 +17,11 @@ struct drm_mode_create_dumb; struct drm_printer; struct sg_table; +enum drm_gem_shmem_caching { + DRM_GEM_SHMEM_CACHED = 1, + DRM_GEM_SHMEM_WC, +}; + /** * struct drm_gem_shmem_object - GEM object backed by shmem */ @@ -83,6 +88,11 @@ struct drm_gem_shmem_object { * The address are un-mapped when the count reaches zero. */ unsigned int vmap_use_count; + + /** + * @caching: caching attributes for mappings. + */ + enum drm_gem_shmem_caching caching; }; #define to_drm_gem_shmem_obj(obj) \ @@ -130,6 +140,8 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj); +pgprot_t drm_gem_shmem_caching(struct drm_gem_shmem_object *shmem, pgprot_t prot); + /** * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations * diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index a421a2eed48a..5bb94e130a50 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -76,6 +76,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t mutex_init(&shmem->pages_lock); mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); + shmem->caching = DRM_GEM_SHMEM_WC; /* * Our buffers are kept pinned, so allocating them @@ -256,9 +257,11 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) if (obj->import_attach) shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); - else + else { + pgprot_t prot = drm_gem_shmem_caching(shmem, PAGE_KERNEL); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, - VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + VM_MAP, prot); + } if (!shmem->vaddr) { DRM_DEBUG_KMS("Failed to vmap pages\n"); @@ -540,7 +543,8 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) } vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_page_prot = drm_gem_shmem_caching(shmem, vma->vm_page_prot); vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); vma->vm_ops = &drm_gem_shmem_vm_ops; @@ -683,3 +687,17 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); + +pgprot_t drm_gem_shmem_caching(struct drm_gem_shmem_object *shmem, pgprot_t prot) +{ + switch (shmem->caching) { + case DRM_GEM_SHMEM_CACHED: + return prot; + case DRM_GEM_SHMEM_WC: + return pgprot_writecombine(prot); + default: + WARN_ON_ONCE(1); + return prot; + } +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_caching); -- 2.18.1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel