[PATCH] drm/i915: Enable debugobjects for request validation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Use debugobjects to track and validate the lifecycle of a struct
drm_i915_gem_request.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/Kconfig.debug      |  14 +++
 drivers/gpu/drm/i915/i915_gem_request.c | 158 ++++++++++++++++++++++++++++++--
 2 files changed, 165 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 108d21f34777..326be039e39c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -27,6 +27,7 @@ config DRM_I915_DEBUG
         select DRM_DEBUG_MM if DRM=y
 	select DRM_DEBUG_MM_SELFTEST
 	select SW_SYNC # signaling validation framework (igt/syncobj*)
+	select DRM_I915_REQUEST_DEBUG_OBJECTS
 	select DRM_I915_SW_FENCE_DEBUG_OBJECTS
 	select DRM_I915_SELFTEST
         default n
@@ -64,6 +65,19 @@ config DRM_I915_TRACE_GEM
 
 	  If in doubt, say "N".
 
+config DRM_I915_REQUEST_DEBUG_OBJECTS
+        bool "Enable additional driver debugging for request objects"
+        depends on DRM_I915
+        select DEBUG_OBJECTS
+        default n
+        help
+          Choose this option to turn on extra driver debugging that may affect
+          performance but will catch some internal issues.
+
+          Recommended for driver developers only.
+
+          If in doubt, say "N".
+
 config DRM_I915_SW_FENCE_DEBUG_OBJECTS
         bool "Enable additional driver debugging for fence objects"
         depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 06ec27a68f5c..001a02d2b603 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -30,14 +30,128 @@
 
 #include "i915_drv.h"
 
+enum {
+	DEBUG_REQUEST_INACTIVE = 0,
+	DEBUG_REQUEST_WAITING,
+	DEBUG_REQUEST_READY,
+	DEBUG_REQUEST_EXECUTING,
+	DEBUG_REQUEST_RETIRED,
+};
+
+#if IS_ENABLED(CONFIG_KASAN)
+static inline struct drm_i915_gem_request *
+__request_alloc(struct drm_i915_private *i915, gfp_t gfp)
+{
+	return kmalloc(sizeof(struct drm_i915_gem_request), gfp);
+}
+
+static inline void
+__request_free(struct drm_i915_gem_request *rq)
+{
+	dma_fence_free(&rq->fence);
+}
+#else
+static inline struct drm_i915_gem_request *
+__request_alloc(struct drm_i915_private *i915, gfp_t gfp)
+{
+	return kmem_cache_alloc(i915->requests, gfp);
+}
+
+static inline void
+__request_free(struct drm_i915_gem_request *rq)
+{
+	kmem_cache_free(rq->i915->requests, rq);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_I915_REQUEST_DEBUG_OBJECTS)
+
+static struct debug_obj_descr i915_request_debug_descr = {
+	.name = "i915_request",
+};
+
+static inline void debug_request_init(struct drm_i915_gem_request *rq)
+{
+	debug_object_init(rq, &i915_request_debug_descr);
+}
+
+static inline void debug_request_activate(struct drm_i915_gem_request *rq)
+{
+	debug_object_activate(rq, &i915_request_debug_descr);
+}
+
+static inline void debug_request_set_state(struct drm_i915_gem_request *rq,
+					   int old, int new)
+{
+	debug_object_active_state(rq, &i915_request_debug_descr, old, new);
+}
+
+static inline void debug_request_deactivate(struct drm_i915_gem_request *rq)
+{
+	debug_object_deactivate(rq, &i915_request_debug_descr);
+}
+
+static inline void debug_request_destroy(struct drm_i915_gem_request *rq)
+{
+	debug_object_destroy(rq, &i915_request_debug_descr);
+}
+
+static inline void debug_request_free(struct drm_i915_gem_request *rq)
+{
+	debug_object_free(rq, &i915_request_debug_descr);
+	smp_wmb(); /* flush the change in state before reallocation */
+}
+
+static inline void debug_request_assert(struct drm_i915_gem_request *rq)
+{
+	debug_object_assert_init(rq, &i915_request_debug_descr);
+}
+
+#else
+
+static inline void debug_request_init(struct drm_i915_gem_request *rq)
+{
+}
+
+static inline void debug_request_activate(struct drm_i915_gem_request *rq)
+{
+}
+
+static inline void debug_request_set_state(struct drm_i915_gem_request *rq,
+					   int old, int new)
+{
+}
+
+static inline void debug_request_deactivate(struct drm_i915_gem_request *rq)
+{
+}
+
+static inline void debug_request_destroy(struct drm_i915_gem_request *rq)
+{
+}
+
+static inline void debug_request_free(struct drm_i915_gem_request *rq)
+{
+}
+
+static inline void debug_request_assert(struct drm_i915_gem_request *rq)
+{
+}
+
+#endif
+
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
+	debug_request_assert(to_request(fence));
 	return "i915";
 }
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
-	/* The timeline struct (as part of the ppgtt underneath a context)
+	debug_request_assert(to_request(fence));
+
+	/*
+	 * The timeline struct (as part of the ppgtt underneath a context)
 	 * may be freed when the request is no longer in use by the GPU.
 	 * We could extend the life of a context to beyond that of all
 	 * fences, possibly keeping the hw resource around indefinitely,
@@ -53,11 +167,14 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 
 static bool i915_fence_signaled(struct dma_fence *fence)
 {
+	debug_request_assert(to_request(fence));
 	return i915_gem_request_completed(to_request(fence));
 }
 
 static bool i915_fence_enable_signaling(struct dma_fence *fence)
 {
+	debug_request_assert(to_request(fence));
+
 	if (i915_fence_signaled(fence))
 		return false;
 
@@ -69,6 +186,8 @@ static signed long i915_fence_wait(struct dma_fence *fence,
 				   bool interruptible,
 				   signed long timeout)
 {
+	debug_request_assert(to_request(fence));
+
 	return i915_wait_request(to_request(fence), interruptible, timeout);
 }
 
@@ -76,7 +195,12 @@ static void i915_fence_release(struct dma_fence *fence)
 {
 	struct drm_i915_gem_request *req = to_request(fence);
 
-	/* The request is put onto a RCU freelist (i.e. the address
+	debug_request_set_state(req,
+				DEBUG_REQUEST_RETIRED, DEBUG_REQUEST_INACTIVE);
+	debug_request_deactivate(req);
+
+	/*
+	 * The request is put onto a RCU freelist (i.e. the address
 	 * is immediately reused), mark the fences as being freed now.
 	 * Otherwise the debugobjects for the fences are only marked as
 	 * freed when the slab cache itself is freed, and so we would get
@@ -84,7 +208,9 @@ static void i915_fence_release(struct dma_fence *fence)
 	 */
 	i915_sw_fence_fini(&req->submit);
 
-	kmem_cache_free(req->i915->requests, req);
+	debug_request_free(req);
+
+	__request_free(req);
 }
 
 const struct dma_fence_ops i915_fence_ops = {
@@ -387,6 +513,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!request->i915->gt.active_requests);
 
 	trace_i915_gem_request_retire(request);
+	debug_request_set_state(request,
+				DEBUG_REQUEST_EXECUTING, DEBUG_REQUEST_RETIRED);
 
 	spin_lock_irq(&engine->timeline->lock);
 	list_del_init(&request->link);
@@ -484,6 +612,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
 	lockdep_assert_held(&engine->timeline->lock);
 
 	trace_i915_gem_request_execute(request);
+	debug_request_set_state(request,
+				DEBUG_REQUEST_READY, DEBUG_REQUEST_EXECUTING);
 
 	/* Transfer from per-context onto the global per-engine timeline */
 	timeline = engine->timeline;
@@ -532,6 +662,9 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!irqs_disabled());
 	lockdep_assert_held(&engine->timeline->lock);
 
+	debug_request_set_state(request,
+				DEBUG_REQUEST_EXECUTING, DEBUG_REQUEST_READY);
+
 	/* Only unwind in reverse order, required so that the per-context list
 	 * is kept in seqno/ring order.
 	 */
@@ -584,6 +717,9 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 	switch (state) {
 	case FENCE_COMPLETE:
 		trace_i915_gem_request_submit(request);
+		debug_request_set_state(request,
+					DEBUG_REQUEST_WAITING,
+					DEBUG_REQUEST_READY);
 		/*
 		 * We need to serialize use of the submit_request() callback with its
 		 * hotplugging performed during an emergency i915_gem_set_wedged().
@@ -688,8 +824,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 *
 	 * Do not use kmem_cache_zalloc() here!
 	 */
-	req = kmem_cache_alloc(dev_priv->requests,
-			       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+	req = __request_alloc(dev_priv,
+			      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 	if (unlikely(!req)) {
 		/* Ratelimit ourselves to prevent oom from malicious clients */
 		ret = i915_gem_wait_for_idle(dev_priv,
@@ -709,13 +845,15 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 		kmem_cache_shrink(dev_priv->requests);
 		rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
 
-		req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
+		req = __request_alloc(dev_priv, GFP_KERNEL);
 		if (!req) {
 			ret = -ENOMEM;
 			goto err_unreserve;
 		}
 	}
 
+	debug_request_init(req);
+
 	req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
 	GEM_BUG_ON(req->timeline == engine->timeline);
 
@@ -784,7 +922,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
 	GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
 
-	kmem_cache_free(dev_priv->requests, req);
+	__request_free(req);
 err_unreserve:
 	unreserve_engine(engine);
 err_unpin:
@@ -1063,6 +1201,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 		engine->schedule(request, request->ctx->priority);
 
 	local_bh_disable();
+	debug_request_activate(request);
+	debug_request_set_state(request,
+				DEBUG_REQUEST_INACTIVE,
+				DEBUG_REQUEST_WAITING);
 	i915_sw_fence_commit(&request->submit);
 	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
 }
@@ -1204,6 +1346,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
 		   !!(flags & I915_WAIT_LOCKED));
 #endif
 	GEM_BUG_ON(timeout < 0);
+	debug_request_assert(req);
 
 	if (i915_gem_request_completed(req))
 		return timeout;
@@ -1319,6 +1462,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
 		remove_wait_queue(errq, &reset);
 	remove_wait_queue(&req->execute, &exec);
 	trace_i915_gem_request_wait_end(req);
+	debug_request_assert(req);
 
 	return timeout;
 }
-- 
2.15.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux