From: Thierry Reding <treding@xxxxxxxxxx> Implement the ->begin_cpu_access() and ->end_cpu_access() callbacks to allow userspace to invalidate the cache before accessing a buffer that is exported using PRIME and flush the cache after modifying the buffer through its userspace mapping. Signed-off-by: Thierry Reding <treding@xxxxxxxxxx> --- drivers/gpu/drm/nouveau/nouveau_prime.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 5d885d7cb059..a3a586ce864c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -29,6 +29,26 @@ #include "nouveau_drv.h" #include "nouveau_gem.h" +static int nouveau_gem_prime_begin_cpu_access(struct dma_buf *buf, + enum dma_data_direction direction) +{ + struct nouveau_bo *bo = nouveau_gem_object(buf->priv); + + nouveau_bo_sync_for_cpu(bo); + + return 0; +} + +static int nouveau_gem_prime_end_cpu_access(struct dma_buf *buf, + enum dma_data_direction direction) +{ + struct nouveau_bo *bo = nouveau_gem_object(buf->priv); + + nouveau_bo_sync_for_device(bo); + + return 0; +} + static void *nouveau_gem_prime_kmap_atomic(struct dma_buf *buf, unsigned long page) { @@ -106,6 +126,8 @@ static const struct dma_buf_ops nouveau_gem_prime_dmabuf_ops = { .map_dma_buf = drm_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, + .begin_cpu_access = nouveau_gem_prime_begin_cpu_access, + .end_cpu_access = nouveau_gem_prime_end_cpu_access, .map_atomic = nouveau_gem_prime_kmap_atomic, .unmap_atomic = nouveau_gem_prime_kunmap_atomic, .map = nouveau_gem_prime_kmap, -- 2.15.1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel