If the GEM objects being rendered with in this request have been exported via dma-buf to a third party, hook ourselves into the dma-buf reservation object so that the third party can serialise with our rendering via the dma-buf fences. Testcase: igt/prime_busy Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 56 ++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 33 ++++++++++++++++-- 2 files changed, 83 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 3a00ab3ad06e..bab71ba9c25a 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -23,9 +23,13 @@ * Authors: * Dave Airlie <airlied@xxxxxxxxxx> */ + +#include <linux/dma-buf.h> +#include <linux/reservation.h> + #include <drm/drmP.h> + #include "i915_drv.h" -#include <linux/dma-buf.h> static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) { @@ -218,25 +222,71 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .end_cpu_access = i915_gem_end_cpu_access, }; +static void export_fences(struct drm_i915_gem_object *obj, + struct dma_buf *dma_buf) +{ + struct reservation_object *resv = dma_buf->resv; + struct drm_i915_gem_request *req; + unsigned long active; + int idx; + + active = __I915_BO_ACTIVE(obj); + if (!active) + return; + + /* Mark the object for future fences before racily adding old fences */ + obj->base.dma_buf = dma_buf; + + mutex_lock(&resv->lock.base); + + for_each_active(active, idx) { + rcu_read_lock(); + req = i915_gem_active_get_rcu(&obj->last_read[idx]); + rcu_read_unlock(); + if (!req) + continue; + + if (reservation_object_reserve_shared(resv) == 0) + reservation_object_add_shared_fence(resv, &req->fence); + + i915_gem_request_put(req); + } + + rcu_read_lock(); + req = i915_gem_active_get_rcu(&obj->last_write); + rcu_read_unlock(); + if (req) { + reservation_object_add_excl_fence(resv, &req->fence); + i915_gem_request_put(req); + } + + mutex_unlock(&resv->lock.base); +} + struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct dma_buf *dma_buf; exp_info.ops = &i915_dmabuf_ops; exp_info.size = gem_obj->size; exp_info.flags = flags; exp_info.priv = gem_obj; - if (obj->ops->dmabuf_export) { int ret = obj->ops->dmabuf_export(obj); if (ret) return ERR_PTR(ret); } - return dma_buf_export(&exp_info); + dma_buf = dma_buf_export(&exp_info); + if (IS_ERR(dma_buf)) + return dma_buf; + + export_fences(obj, dma_buf); + return dma_buf; } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 0d28703d991a..e2aba40bf328 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -26,13 +26,17 @@ * */ +#include <linux/dma_remapping.h> +#include <linux/reservation.h> +#include <linux/uaccess.h> + #include <drm/drmP.h> #include <drm/i915_drm.h> + #include "i915_drv.h" +#include "i915_gem_dmabuf.h" #include "i915_trace.h" #include "intel_drv.h" -#include <linux/dma_remapping.h> -#include <linux/uaccess.h> #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) @@ -1193,7 +1197,29 @@ void i915_vma_move_to_active(struct i915_vma *vma, list_move_tail(&vma->vm_link, &vma->vm->active_list); } -static void +static void eb_export_fence(struct drm_i915_gem_object *obj, + struct drm_i915_gem_request *req, + unsigned int flags) +{ + struct reservation_object *resv; + + resv = i915_gem_object_get_dmabuf_resv(obj); + if (!resv) + return; + + /* Ignore errors from failing to allocate the new fence, we can't + * handle an error right now. Worst case should be missed + * synchronisation leading to rendering corruption. + */ + mutex_lock(&resv->lock.base); + if (flags & EXEC_OBJECT_WRITE) + reservation_object_add_excl_fence(resv, &req->fence); + else if (reservation_object_reserve_shared(resv) == 0) + reservation_object_add_shared_fence(resv, &req->fence); + mutex_unlock(&resv->lock.base); +} + +void i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct drm_i915_gem_request *req) { @@ -1212,6 +1238,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, obj->base.read_domains = obj->base.pending_read_domains; i915_vma_move_to_active(vma, req, vma->exec_entry->flags); + eb_export_fence(obj, req, vma->exec_entry->flags); trace_i915_gem_object_change_domain(obj, old_read, old_write); } } -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx