Rendering operations to the dma-buf are tracked implicitly via the reservation_object (dmabuf->resv). The dmabuf sync ioctl allows userspace to wait upon outstanding rendering and prepare the object for CPU access (provided by the prime dma_buf_ops.begin_cpu_access). Fill this out for the generic drm_gem_prime by waiting on outstanding rendering via dmabuf->resv. (This offers an alternative to using poll that is consistent with other drivers that may need to more work to prepare the object for access by the CPU.) Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/drm_prime.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 780589b420a4..479ff7cc3634 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -28,6 +28,7 @@ #include <linux/export.h> #include <linux/dma-buf.h> +#include <linux/reservation.h> #include <drm/drmP.h> #include <drm/drm_gem.h> @@ -288,6 +289,22 @@ static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, return dev->driver->gem_prime_mmap(obj, vma); } +static int drm_gem_begin_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction direction) +{ + bool write = (direction == DMA_BIDIRECTIONAL || + direction == DMA_TO_DEVICE); + struct reservation_object *resv = dma_buf->resv; + long ret; + + ret = reservation_object_wait_timeout_rcu(resv, write, true, + MAX_SCHEDULE_TIMEOUT); + if (ret < 0) + return ret; + + return 0; +} + static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .attach = drm_gem_map_attach, .detach = drm_gem_map_detach, @@ -301,6 +318,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, + .begin_cpu_access = drm_gem_begin_cpu_access, }; /** -- 2.8.1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel