[PATCH 100/190] drm/i915: Remove request retirement before each batch

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This reimplements the denial-of-service protection against igt from

commit 227f782e4667fc622810bce8be8ccdeee45f89c2
Author: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Date:   Thu May 15 10:41:42 2014 +0100

    drm/i915: Retire requests before creating a new one

and transfers the stall from before each batch into a the close handler.
The issue is that the stall is increasing latency between batches which
is detrimental in some cases (especially coupled with execlists) to
keeping the GPU well fed. Also we have made the observation that retiring
requests can of itself free objects (and requests) and therefore makes
a good first step when shrinking.

v2: Recycle objects prior to i915_gem_object_get_pages()

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_drv.h            |  1 -
 drivers/gpu/drm/i915/i915_gem.c            | 23 +++++++++++++++--------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 --
 3 files changed, 15 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index be63eaf8764a..5711ae3a22a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2780,7 +2780,6 @@ struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring);
 
 void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 
 static inline u32 i915_reset_counter(struct i915_gpu_error *error)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a0207b9d1aea..d705005ca26e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1986,7 +1986,6 @@ err_pages:
 int
 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	const struct drm_i915_gem_object_ops *ops = obj->ops;
 	int ret;
 
@@ -2000,11 +1999,15 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
 	BUG_ON(obj->pages_pin_count);
 
+	/* Recycle as many active objects as possible first */
+	i915_gem_retire_requests(obj->base.dev);
+
 	ret = ops->get_pages(obj);
 	if (ret)
 		return ret;
 
-	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+	list_add_tail(&obj->global_list,
+		      &to_i915(obj->base.dev)->mm.unbound_list);
 
 	obj->get_page.sg = obj->pages->sgl;
 	obj->get_page.last = 0;
@@ -2259,7 +2262,7 @@ void i915_gem_reset(struct drm_device *dev)
 /**
  * This function clears the request list as sequence numbers are passed.
  */
-void
+static bool
 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 {
 	while (!list_empty(&ring->request_list)) {
@@ -2270,10 +2273,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 					   link);
 
 		if (!i915_gem_request_completed(request))
-			break;
+			return false;
 
 		i915_gem_request_retire_upto(request);
 	}
+
+	return true;
 }
 
 void
@@ -2281,19 +2286,18 @@ i915_gem_retire_requests(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring;
-	bool idle = true;
+	bool idle;
 	int i;
 
 	if (!dev_priv->mm.busy)
 		return;
 
+	idle = true;
 	for_each_ring(ring, dev_priv, i) {
-		i915_gem_retire_requests_ring(ring);
-		idle &= list_empty(&ring->request_list);
+		idle &= i915_gem_retire_requests_ring(ring);
 		if (i915.enable_execlists)
 			idle &= intel_execlists_retire_requests(ring);
 	}
-
 	if (idle)
 		queue_delayed_work(dev_priv->wq,
 				   &dev_priv->mm.idle_work,
@@ -2399,6 +2403,7 @@ void i915_gem_close_object(struct drm_gem_object *gem,
 	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
 		if (vma->vm->file == fpriv)
 			i915_vma_close(vma);
+	i915_gem_object_flush_active(obj);
 	mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
@@ -4235,7 +4240,9 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 static void
 init_ring_lists(struct intel_engine_cs *ring)
 {
+	/* Early initialisation so that core GEM works during engine setup */
 	INIT_LIST_HEAD(&ring->request_list);
+	INIT_LIST_HEAD(&ring->execlist_completed);
 }
 
 void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7a9d3f4732e9..90c5341506be 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -741,8 +741,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	int retry;
 
-	i915_gem_retire_requests_ring(ring);
-
 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
 	INIT_LIST_HEAD(&ordered_vmas);
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux