[PATCH 159/190] drm/i915: Defer active reference until required

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



We only need the active reference to keep the object alive after the
handle has been deleted (so as to prevent a synchronous gem_close). Why
the pay the price of a kref on every execbuf when we can insert that
final active ref just in time for the handle deletion?

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_drv.h              | 26 +++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem.c              | 31 +++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem_batch_pool.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem_context.c      |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  5 +----
 drivers/gpu/drm/i915/i915_gem_render_state.c |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c      |  2 +-
 7 files changed, 57 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 917686eed962..addd33bbc847 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2048,6 +2048,13 @@ struct drm_i915_gem_object {
 #define __I915_BO_ACTIVE(bo) (READ_ONCE((bo)->flags) & (I915_BO_ACTIVE_MASK << I915_BO_ACTIVE_SHIFT))
 
 	/**
+	 * Have we taken a reference for the object for incomplete GPU
+	 * activity?
+	 */
+#define I915_BO_ACTIVE_REF_SHIFT (I915_BO_ACTIVE_SHIFT + I915_NUM_RINGS)
+#define I915_BO_ACTIVE_REF_BIT (1 << I915_BO_ACTIVE_REF_SHIFT)
+
+	/**
 	 * This is set if the object has been written to since last bound
 	 * to the GTT
 	 */
@@ -2163,6 +2170,25 @@ i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
 	return obj->flags & (1 << (engine + I915_BO_ACTIVE_SHIFT));
 }
 
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+	return obj->flags & I915_BO_ACTIVE_REF_BIT;
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+	obj->flags |= I915_BO_ACTIVE_REF_BIT;
+}
+
+static inline void
+i915_gem_object_unset_active_reference(struct drm_i915_gem_object *obj)
+{
+	obj->flags &= ~I915_BO_ACTIVE_REF_BIT;
+}
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1fa4752682d6..962fd81ce26c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2116,7 +2116,10 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
 		list_move_tail(&obj->global_list,
 			       &request->i915->mm.bound_list);
 
-	drm_gem_object_unreference(&obj->base);
+	if (i915_gem_object_has_active_reference(obj)) {
+		i915_gem_object_unset_active_reference(obj);
+		drm_gem_object_unreference(&obj->base);
+	}
 }
 
 static void i915_gem_mark_idle(struct drm_i915_private *dev_priv)
@@ -2390,13 +2393,12 @@ out:
  * write domains, emitting any outstanding lazy request and retiring and
  * completed requests.
  */
-static void
-i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+static bool i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
 	int i;
 
 	if (!i915_gem_object_is_active(obj))
-		return;
+		return false;
 
 	for (i = 0; i < I915_NUM_RINGS; i++) {
 		struct drm_i915_gem_request *req;
@@ -2408,6 +2410,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 		if (i915_gem_request_completed(req))
 			i915_gem_request_retire_upto(req);
 	}
+
+	return i915_gem_object_is_active(obj);
 }
 
 void i915_vma_close(struct i915_vma *vma)
@@ -2431,7 +2435,12 @@ void i915_gem_close_object(struct drm_gem_object *gem,
 	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
 		if (vma->vm->file == fpriv)
 			i915_vma_close(vma);
-	i915_gem_object_flush_active(obj);
+
+	if (i915_gem_object_flush_active(obj) &&
+	    !i915_gem_object_has_active_reference(obj)) {
+		i915_gem_object_set_active_reference(obj);
+		drm_gem_object_reference(&obj->base);
+	}
 	mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
@@ -3847,6 +3856,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	intel_runtime_pm_put(dev_priv);
 }
 
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+	if (obj == NULL)
+		return;
+
+	GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+	if (i915_gem_object_is_active(obj))
+		i915_gem_object_set_active_reference(obj);
+	else
+		drm_gem_object_unreference(&obj->base);
+}
+
 static void
 i915_gem_stop_ringbuffers(struct drm_device *dev)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 5ec5b1439e1f..d46012234db1 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -75,7 +75,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 						 batch_pool_link);
 
 			list_del(&obj->batch_pool_link);
-			drm_gem_object_unreference(&obj->base);
+			__i915_gem_object_release_unless_active(obj);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f261d9e0929d..e619cdadaeb6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -151,7 +151,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 		if (ce->ring)
 			intel_ring_free(ce->ring);
 
-		drm_gem_object_unreference(&ce->state->base);
+		__i915_gem_object_release_unless_active(ce->state);
 	}
 
 	i915_ppgtt_put(ctx->ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b7424f1b1293..00f3529a3560 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1202,15 +1202,12 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
 	obj->dirty = 1; /* be paranoid  */
 
-	/* Add a reference if we're newly entering the active list.
-	 * The order in which we add operations to the retirement queue is
+	/* The order in which we add operations to the retirement queue is
 	 * vital here: mark_active adds to the start of the callback list,
 	 * such that subsequent callbacks are called first. Therefore we
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	if (!i915_gem_object_is_active(obj))
-		drm_gem_object_reference(&obj->base);
 	i915_gem_object_set_active(obj, engine);
 	i915_gem_request_mark_active(req, &obj->last_read[engine]);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 89b5c99bbb02..2fac95b0ba44 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -173,7 +173,7 @@ err_out:
 static void render_state_fini(struct render_state *so)
 {
 	i915_vma_unpin(so->vma);
-	drm_gem_object_unreference(&so->obj->base);
+	__i915_gem_object_release_unless_active(so->obj);
 }
 
 static int render_state_prepare(struct intel_engine_cs *ring,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index be2207f551e3..41c52cdcbe4a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1961,7 +1961,7 @@ void intel_ring_unmap(struct intel_ring *ring)
 
 static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
 {
-	drm_gem_object_unreference(&ringbuf->obj->base);
+	__i915_gem_object_release_unless_active(ringbuf->obj);
 	ringbuf->obj = NULL;
 }
 
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux