[PATCH 18/40] drm/i915: Hook scheduler node clean up into retire requests

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: John Harrison <John.C.Harrison@xxxxxxxxx>

The scheduler keeps its own lock on various DRM objects in order to
guarantee safe access long after the original execbuff IOCTL has
completed. This is especially important when pre-emption is enabled as
the batch buffer might need to be submitted to the hardware multiple
times. This patch hooks the clean up of these locks into the request
retire function. The request can only be retired after it has
completed on the hardware and thus is no longer eligible for
re-submission. Thus there is no point holding on to the locks beyond
that time.

v3: Updated to not WARN when cleaning a node that is being cancelled.
The clean will happen later so skipping it at the point of
cancellation is fine.

For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_gem.c       |  3 ++
 drivers/gpu/drm/i915/i915_scheduler.c | 54 ++++++++++++++++++++++++-----------
 drivers/gpu/drm/i915/i915_scheduler.h |  1 +
 3 files changed, 42 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dc5f3fe..349ff58 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1402,6 +1402,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 		fence_signal_locked(&request->fence);
 	}
 
+	if (request->scheduler_qe)
+		i915_gem_scheduler_clean_node(request->scheduler_qe);
+
 	i915_gem_request_unreference(request);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 300cd89..f88c871 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -406,6 +406,41 @@ void i915_scheduler_wakeup(struct drm_device *dev)
 	queue_work(dev_priv->wq, &dev_priv->mm.scheduler_work);
 }
 
+void i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node)
+{
+	uint32_t i;
+
+	if (!I915_SQS_IS_COMPLETE(node)) {
+		WARN(!node->params.request->cancelled,
+		     "Cleaning active node: %d!\n", node->status);
+		return;
+	}
+
+	if (node->params.batch_obj) {
+		/* The batch buffer must be unpinned before it is unreferenced
+		 * otherwise the unpin fails with a missing vma!? */
+		if (node->params.dispatch_flags & I915_DISPATCH_SECURE)
+			i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
+
+		node->params.batch_obj = NULL;
+	}
+
+	/* Release the locked buffers: */
+	for (i = 0; i < node->num_objs; i++) {
+		drm_gem_object_unreference(
+				    &node->saved_objects[i].obj->base);
+	}
+	kfree(node->saved_objects);
+	node->saved_objects = NULL;
+	node->num_objs = 0;
+
+	/* Context too: */
+	if (node->params.ctx) {
+		i915_gem_context_unreference(node->params.ctx);
+		node->params.ctx = NULL;
+	}
+}
+
 static int i915_scheduler_remove(struct intel_engine_cs *ring)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -415,7 +450,7 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
 	int                 flying = 0, queued = 0;
 	int                 ret = 0;
 	bool                do_submit;
-	uint32_t            i, min_seqno;
+	uint32_t            min_seqno;
 	struct list_head    remove;
 
 	if (list_empty(&scheduler->node_queue[ring->id]))
@@ -514,21 +549,8 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
 		node = list_first_entry(&remove, typeof(*node), link);
 		list_del(&node->link);
 
-		/* The batch buffer must be unpinned before it is unreferenced
-		 * otherwise the unpin fails with a missing vma!? */
-		if (node->params.dispatch_flags & I915_DISPATCH_SECURE)
-			i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
-
-		/* Release the locked buffers: */
-		for (i = 0; i < node->num_objs; i++) {
-			drm_gem_object_unreference(
-					    &node->saved_objects[i].obj->base);
-		}
-		kfree(node->saved_objects);
-
-		/* Context too: */
-		if (node->params.ctx)
-			i915_gem_context_unreference(node->params.ctx);
+		/* Free up all the DRM object references */
+		i915_gem_scheduler_clean_node(node);
 
 		/* And anything else owned by the node: */
 		node->params.request->scheduler_qe = NULL;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 56f68e5..54d87fb 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -88,6 +88,7 @@ bool        i915_scheduler_is_enabled(struct drm_device *dev);
 int         i915_scheduler_init(struct drm_device *dev);
 int         i915_scheduler_closefile(struct drm_device *dev,
 				     struct drm_file *file);
+void        i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node);
 int         i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
 bool        i915_scheduler_notify_request(struct drm_i915_gem_request *req);
 void        i915_scheduler_wakeup(struct drm_device *dev);
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux