On 25/08/2016 10:08, Chris Wilson wrote:We are about to specialize object synchronisation to enable nonblocking execbuf submission. First we make a copy of the current object synchronisation for execbuffer. The general i915_gem_object_sync() will be removed following the removal of CS flips in the near future. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 64 +++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 601156c353cc..b7cc158733ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1122,6 +1122,68 @@ static unsigned int eb_other_engines(struct drm_i915_gem_request *req) } static int +__eb_sync(struct drm_i915_gem_request *to, + struct drm_i915_gem_request *from) +{ + int idx, ret; + + if (to->engine == from->engine) + return 0; + + idx = intel_engine_sync_index(from->engine, to->engine); + if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx]) + return 0; + + trace_i915_gem_ring_sync_to(to, from); + if (!i915.semaphores) { + ret = i915_wait_request(from, true, NULL, NO_WAITBOOST); + if (ret) + return ret; + } else { + ret = to->engine->semaphore.sync_to(to, from); + if (ret) + return ret; + } + + from->engine->semaphore.sync_seqno[idx] = from->fence.seqno; + return 0; +} + +static int +eb_sync(struct drm_i915_gem_object *obj, + struct drm_i915_gem_request *to, + bool write) +{ + struct i915_gem_active *active; + unsigned long active_mask; + int idx; + + if (write) { + active_mask = i915_gem_object_get_active(obj); + active = obj->last_read; + } else { + active_mask = 1; + active = &obj->last_write; + } + + for_each_active(active_mask, idx) { + struct drm_i915_gem_request *request; + int ret; + + request = i915_gem_active_peek(&active[idx], + &obj->base.dev->struct_mutex); + if (!request) + continue; + + ret = __eb_sync(to, request); + if (ret) + return ret; + } + + return 0; +} + +static int i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct list_head *vmas) { @@ -1133,7 +1195,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct drm_i915_gem_object *obj = vma->obj; if (obj->flags & other_rings) { - ret = i915_gem_object_sync(obj, req); + ret = eb_sync(obj, req, obj->base.pending_write_domain); if (ret) return ret; } Reviewed-by: John Harrison <john.c.harrison@xxxxxxxxx> |
_______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx