Re: [PATCH 19/21] drm/i915: Merge wait_for_timelines with retire_request

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 02/09/2019 05:03, Chris Wilson wrote:
wait_for_timelines is essentially the same loop as retiring requests
(with an extra), so merge the two into one routine.

Extra suspense! :)


Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
  drivers/gpu/drm/i915/gem/i915_gem_mman.c      |  4 +-
  drivers/gpu/drm/i915/gem/i915_gem_pm.c        |  6 +-
  .../drm/i915/gem/selftests/i915_gem_context.c |  4 +-
  drivers/gpu/drm/i915/gt/selftest_timeline.c   |  2 +-
  drivers/gpu/drm/i915/i915_debugfs.c           |  6 +-
  drivers/gpu/drm/i915/i915_drv.h               |  3 +-
  drivers/gpu/drm/i915/i915_gem.c               | 68 ++-----------------
  drivers/gpu/drm/i915/i915_gem_evict.c         | 12 ++--
  drivers/gpu/drm/i915/i915_gem_gtt.c           |  2 +-
  drivers/gpu/drm/i915/i915_request.c           | 21 +++++-
  drivers/gpu/drm/i915/i915_request.h           |  3 +-
  .../gpu/drm/i915/selftests/igt_flush_test.c   |  4 +-
  .../gpu/drm/i915/selftests/igt_live_test.c    |  4 +-
  .../gpu/drm/i915/selftests/mock_gem_device.c  |  2 +-
  14 files changed, 42 insertions(+), 99 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 9a8c307c5aeb..761ab0076a6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -429,9 +429,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
/* Attempt to reap some mmap space from dead objects */
  	do {
-		err = i915_gem_wait_for_idle(i915,
-					     I915_WAIT_INTERRUPTIBLE,
-					     MAX_SCHEDULE_TIMEOUT);
+		err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
  		if (err)
  			break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index e83eed8fa452..afbcf9219267 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -25,7 +25,7 @@ static void retire_work_handler(struct work_struct *work)
  	struct drm_i915_private *i915 =
  		container_of(work, typeof(*i915), gem.retire_work.work);
- i915_retire_requests(i915);
+	i915_retire_requests(i915, 0);

Majority of callers end up with ", 0" which looks a bit aesthetically not pleasing. How about you add __i915_retire_requests(i915, timeout) instead for those few callers which need it? Or i915_retire_requests_wait/sync/timeout?

queue_delayed_work(i915->wq,
  			   &i915->gem.retire_work,
@@ -59,9 +59,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
  {
  	bool result = !intel_gt_is_wedged(gt);
- if (i915_gem_wait_for_idle(gt->i915,
-				   I915_WAIT_FOR_IDLE_BOOST,
-				   I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+	if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {

This now ends up interruptible sleep on a driver load path (and probably others) and I am not sure if that is okay. How about we keep the interruptible annotation?

  		/* XXX hide warning from gem_eio */
  		if (i915_modparams.reset) {
  			dev_err(gt->i915->drm.dev,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index b87e35a713b8..bc4c8d763024 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -304,7 +304,7 @@ create_test_object(struct i915_address_space *vm,
  	int err;
/* Keep in GEM's good graces */
-	i915_retire_requests(vm->i915);
+	i915_retire_requests(vm->i915, 0);
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
  	size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
@@ -923,7 +923,7 @@ __sseu_finish(const char *name,
if ((flags & TEST_IDLE) && ret == 0) {
  		ret = i915_gem_wait_for_idle(ce->engine->i915,
-					     0, MAX_SCHEDULE_TIMEOUT);
+					     MAX_SCHEDULE_TIMEOUT);
  		if (ret)
  			return ret;
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 16abfabf08c7..b0b0fa5f91de 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -734,7 +734,7 @@ static int live_hwsp_wrap(void *arg)
  			goto out;
  		}
- i915_retire_requests(i915); /* recycle HWSP */
+		i915_retire_requests(i915, 0); /* recycle HWSP */
  	}
out:
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 09c6c485a732..d7410f3f576f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3612,12 +3612,10 @@ i915_drop_caches_set(void *data, u64 val)
  		intel_gt_set_wedged(&i915->gt);
if (val & DROP_RETIRE)
-		i915_retire_requests(i915);
+		i915_retire_requests(i915, 0);
if (val & (DROP_IDLE | DROP_ACTIVE)) {
-		ret = i915_gem_wait_for_idle(i915,
-					     I915_WAIT_INTERRUPTIBLE,
-					     MAX_SCHEDULE_TIMEOUT);
+		ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
  		if (ret)
  			return ret;
  	}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b33fc7972e6b..3d1d652431be 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2314,8 +2314,7 @@ void i915_gem_driver_register(struct drm_i915_private *i915);
  void i915_gem_driver_unregister(struct drm_i915_private *i915);
  void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
  void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-			   unsigned int flags, long timeout);
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout);
  void i915_gem_suspend(struct drm_i915_private *dev_priv);
  void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
  void i915_gem_resume(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 225fd22af858..c5f1c2043f97 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -890,79 +890,19 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
  	}
  }
-static long
-wait_for_timelines(struct drm_i915_private *i915,
-		   unsigned int wait, long timeout)
-{
-	struct intel_gt_timelines *timelines = &i915->gt.timelines;
-	struct intel_timeline *tl;
-	unsigned long flags;
-
-	spin_lock_irqsave(&timelines->lock, flags);
-	list_for_each_entry(tl, &timelines->active_list, link) {
-		struct dma_fence *fence;
-
-		fence = i915_active_fence_get(&tl->last_request);
-		if (!fence)
-			continue;
-
-		spin_unlock_irqrestore(&timelines->lock, flags);
-
-		if (!dma_fence_is_i915(fence)) {
-			timeout = dma_fence_wait_timeout(fence,
-							 flags & I915_WAIT_INTERRUPTIBLE,
-							 timeout);
-		} else {
-			struct i915_request *rq = to_request(fence);
-
-			/*
-			 * "Race-to-idle".
-			 *
-			 * Switching to the kernel context is often used as
-			 * a synchronous step prior to idling, e.g. in suspend
-			 * for flushing all current operations to memory before
-			 * sleeping. These we want to complete as quickly as
-			 * possible to avoid prolonged stalls, so allow the gpu
-			 * to boost to maximum clocks.
-			 */
-			if (flags & I915_WAIT_FOR_IDLE_BOOST)
-				gen6_rps_boost(rq);
-
-			timeout = i915_request_wait(rq, flags, timeout);
-		}
-
-		dma_fence_put(fence);
-		if (timeout < 0)
-			return timeout;
-
-		/* restart after reacquiring the lock */
-		spin_lock_irqsave(&timelines->lock, flags);
-		tl = list_entry(&timelines->active_list, typeof(*tl), link);
-	}
-	spin_unlock_irqrestore(&timelines->lock, flags);
-
-	return timeout;
-}
-
-int i915_gem_wait_for_idle(struct drm_i915_private *i915,
-			   unsigned int flags, long timeout)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout)
  {
  	/* If the device is asleep, we have no requests outstanding */
  	if (!intel_gt_pm_is_awake(&i915->gt))
  		return 0;
- do {
-		timeout = wait_for_timelines(i915, flags, timeout);
-		if (timeout < 0)
-			return timeout;
-
+	while ((timeout = i915_retire_requests(i915, timeout)) > 0) {
  		cond_resched();
  		if (signal_pending(current))
  			return -EINTR;
+	}
- } while (i915_retire_requests(i915));
-
-	return 0;
+	return timeout;
  }
struct i915_vma *
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 44f5b638fa43..708055a3887e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -46,9 +46,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
  	 * the hopes that we can then remove contexts and the like only
  	 * bound by their active reference.
  	 */
-	return i915_gem_wait_for_idle(i915,
-				      I915_WAIT_INTERRUPTIBLE,
-				      MAX_SCHEDULE_TIMEOUT);
+	return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
  }
static bool
@@ -126,6 +124,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
  				    min_size, alignment, cache_level,
  				    start, end, mode);
+ i915_retire_requests(vm->i915, 0);
+
  search_again:
  	active = NULL;
  	INIT_LIST_HEAD(&eviction_list);
@@ -265,13 +265,13 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
trace_i915_gem_evict_node(vm, target, flags); - /* Retire before we search the active list. Although we have
+	/*
+	 * Retire before we search the active list. Although we have
  	 * reasonable accuracy in our retirement lists, we may have
  	 * a stray pin (preventing eviction) that can only be resolved by
  	 * retiring.
  	 */
-	if (!(flags & PIN_NONBLOCK))
-		i915_retire_requests(vm->i915);
+	i915_retire_requests(vm->i915, 0);
check_color = vm->mm.color_adjust;
  	if (check_color) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 60676de059a7..2b7a4d49b2e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2524,7 +2524,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
-		if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
+		if (i915_retire_requests(dev_priv, MAX_SCHEDULE_TIMEOUT)) {

Why this couldn't state i915_gem_wait_for_idle?

  			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
  			/* Wait a bit, in hopes it avoids the hang */
  			udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 4ecfae143276..1c5e804c9ca2 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1429,10 +1429,11 @@ long i915_request_wait(struct i915_request *rq,
  	return timeout;
  }
-bool i915_retire_requests(struct drm_i915_private *i915)
+long i915_retire_requests(struct drm_i915_private *i915, long timeout)
  {
  	struct intel_gt_timelines *timelines = &i915->gt.timelines;
  	struct intel_timeline *tl, *tn;
+	unsigned long active_count = 0;
  	unsigned long flags;
  	LIST_HEAD(free);
@@ -1446,13 +1447,27 @@ bool i915_retire_requests(struct drm_i915_private *i915)
  		tl->active_count++; /* pin the list element */
  		spin_unlock_irqrestore(&timelines->lock, flags);
+ if (timeout > 0) {
+			struct dma_fence *fence;
+
+			fence = i915_active_fence_get(&tl->last_request);
+			if (fence) {
+				timeout = dma_fence_wait_timeout(fence,
+								 true,
+								 timeout);
+				dma_fence_put(fence);
+			}
+		}
+
  		retire_requests(tl);
spin_lock_irqsave(&timelines->lock, flags); /* Resume iteration after dropping lock */
  		list_safe_reset_next(tl, tn, link);
-		if (!--tl->active_count)
+		if (--tl->active_count)
+			active_count += !!rcu_access_pointer(tl->last_request.fence);
+		else
  			list_del(&tl->link);
mutex_unlock(&tl->mutex);
@@ -1468,7 +1483,7 @@ bool i915_retire_requests(struct drm_i915_private *i915)
  	list_for_each_entry_safe(tl, tn, &free, link)
  		__intel_timeline_free(&tl->kref);
- return !list_empty(&timelines->active_list);
+	return active_count ? timeout : 0;
  }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 57a2193c64d1..2a5d682aa6b1 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -310,7 +310,6 @@ long i915_request_wait(struct i915_request *rq,
  #define I915_WAIT_INTERRUPTIBLE	BIT(0)
  #define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
  #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline bool i915_request_signaled(const struct i915_request *rq)
  {
@@ -440,6 +439,6 @@ static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
  	return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
  }
-bool i915_retire_requests(struct drm_i915_private *i915);
+long i915_retire_requests(struct drm_i915_private *i915, long timeout);
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 2a5fbe46ea9f..ed496bd6d84f 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -18,8 +18,7 @@ int igt_flush_test(struct drm_i915_private *i915)
cond_resched(); - i915_retire_requests(i915);
-	if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
+	if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) {
  		pr_err("%pS timed out, cancelling all further testing.\n",
  		       __builtin_return_address(0));
@@ -30,7 +29,6 @@ int igt_flush_test(struct drm_i915_private *i915)
  		intel_gt_set_wedged(&i915->gt);
  		ret = -EIO;
  	}
-	i915_retire_requests(i915);
return ret;
  }
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 04a6f88fdf64..eae90f97df6c 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -23,9 +23,7 @@ int igt_live_test_begin(struct igt_live_test *t,
  	t->func = func;
  	t->name = name;
- err = i915_gem_wait_for_idle(i915,
-				     I915_WAIT_INTERRUPTIBLE,
-				     MAX_SCHEDULE_TIMEOUT);
+	err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
  	if (err) {
  		pr_err("%s(%s): failed to idle before, with err=%d!",
  		       func, name, err);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index f3e9b5d7d098..66cc5634db1c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -44,7 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915)
  	do {
  		for_each_engine(engine, i915, id)
  			mock_engine_flush(engine);
-	} while (i915_retire_requests(i915));
+	} while (i915_retire_requests(i915, MAX_SCHEDULE_TIMEOUT));
  }
static void mock_device_release(struct drm_device *dev)

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux