This patch is broken out of the next just to remove the code motion from that patch and make it more readable. What we do here is move the i915_vma_move_to_active() to i915_gem_execbuffer.c and put the three stages (read, write, fenced) together so that future modifications to active handling are all located in the same spot. The importance of this is so that we can more simply control the order in which the requests are place in the retirement list (i.e. control the order at which we retire and so control the lifetimes to avoid having to hold onto references). Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 18 ------- drivers/gpu/drm/i915/i915_gem_context.c | 9 ++-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 72 ++++++++++++++++++---------- drivers/gpu/drm/i915/i915_gem_render_state.c | 2 +- 5 files changed, 55 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fa42cb14ed9b..42f2ec097e7c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3133,7 +3133,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct drm_i915_gem_request *to); void i915_vma_move_to_active(struct i915_vma *vma, - struct drm_i915_gem_request *req); + struct drm_i915_gem_request *req, + unsigned int flags); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ba6cc3642862..08623d8d22dd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2330,24 +2330,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) return obj->mapping; } -void i915_vma_move_to_active(struct i915_vma *vma, - struct drm_i915_gem_request *req) -{ - struct drm_i915_gem_object *obj = vma->obj; - struct intel_engine_cs *engine; - - engine = i915_gem_request_get_engine(req); - - /* Add a reference if we're newly entering the active list. */ - if (obj->active == 0) - i915_gem_object_get(obj); - obj->active |= intel_engine_flag(engine); - - i915_gem_active_set(&obj->last_read[engine->id], req); - - list_move_tail(&vma->vm_link, &vma->vm->active_list); -} - static void i915_gem_object_retire__fence(struct i915_gem_active *active, struct drm_i915_gem_request *req) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index c213fafe41d9..ce3de01f4a6e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -815,8 +815,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req); + struct drm_i915_gem_object *obj = from->engine[RCS].state; + /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -824,10 +824,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - from->engine[RCS].state->dirty = 1; + obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0); /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(from->engine[RCS].state); + i915_gem_object_ggtt_unpin(obj); i915_gem_context_put(from); } engine->last_context = i915_gem_context_get(to); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 6e43d96a87ca..9b6befa17655 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1114,8 +1114,48 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, return ctx; } +void i915_vma_move_to_active(struct i915_vma *vma, + struct drm_i915_gem_request *req, + unsigned int flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + const unsigned int idx = req->engine->id; + + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + + obj->dirty = 1; /* be paranoid */ + + /* Add a reference if we're newly entering the active list. */ + if (obj->active == 0) + i915_gem_object_get(obj); + obj->active |= 1 << idx; + i915_gem_active_set(&obj->last_read[idx], req); + + if (flags & EXEC_OBJECT_WRITE) { + i915_gem_active_set(&obj->last_write, req); + + intel_fb_obj_invalidate(obj, ORIGIN_CS); + + /* update for the implicit flush after a batch */ + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + } + + if (flags & EXEC_OBJECT_NEEDS_FENCE) { + i915_gem_active_set(&obj->last_fence, req); + if (flags & __EXEC_OBJECT_HAS_FENCE) { + struct drm_i915_private *dev_priv = req->i915; + + list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, + &dev_priv->mm.fence_list); + } + } + + list_move_tail(&vma->vm_link, &vma->vm->active_list); +} + static void eb_export_fence(struct drm_i915_gem_object *obj, - struct drm_i915_gem_request *req) + struct drm_i915_gem_request *req, + unsigned int flags) { struct reservation_object *resv; @@ -1128,7 +1168,7 @@ static void eb_export_fence(struct drm_i915_gem_object *obj, * synchronisation leading to rendering corruption. */ mutex_lock(&resv->lock.base); - if (obj->base.write_domain) + if (flags & EXEC_OBJECT_WRITE) reservation_object_add_excl_fence(resv, &req->fence); else if (reservation_object_reserve_shared(resv) == 0) reservation_object_add_shared_fence(resv, &req->fence); @@ -1139,40 +1179,22 @@ static void i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = i915_gem_request_get_engine(req); struct i915_vma *vma; list_for_each_entry(vma, vmas, exec_list) { - struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_object *obj = vma->obj; u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; - obj->dirty = 1; /* be paranoid */ obj->base.write_domain = obj->base.pending_write_domain; - if (obj->base.write_domain == 0) + if (obj->base.write_domain) + vma->exec_entry->flags |= EXEC_OBJECT_WRITE; + else obj->base.pending_read_domains |= obj->base.read_domains; obj->base.read_domains = obj->base.pending_read_domains; - i915_vma_move_to_active(vma, req); - eb_export_fence(obj, req); - if (obj->base.write_domain) { - i915_gem_active_set(&obj->last_write, req); - - intel_fb_obj_invalidate(obj, ORIGIN_CS); - - /* update for the implicit flush after a batch */ - obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; - } - if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - i915_gem_active_set(&obj->last_fence, req); - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = engine->i915; - list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, - &dev_priv->mm.fence_list); - } - } - + i915_vma_move_to_active(vma, req, vma->exec_entry->flags); + eb_export_fence(obj, req, vma->exec_entry->flags); trace_i915_gem_object_change_domain(obj, old_read, old_write); } } diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index dd84793d75d9..2165cd604691 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -260,7 +260,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) goto out; } - i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req); + i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0); out: render_state_fini(&so); return ret; -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx