Re: [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 28/09/2018 14:58, Chris Wilson wrote:
When submitting chains to each engine, we can do so (mostly) in
parallel, so delegate submission to threads on a per-engine basis.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
  drivers/gpu/drm/i915/selftests/intel_lrc.c | 73 ++++++++++++++++++----
  1 file changed, 61 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 3a474bb64c05..d68a924c530e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -587,8 +587,10 @@ static int random_priority(struct rnd_state *rnd)
  struct preempt_smoke {
  	struct drm_i915_private *i915;
  	struct i915_gem_context **contexts;
+	struct intel_engine_cs *engine;
  	unsigned int ncontext;
  	struct rnd_state prng;
+	unsigned long count;
  };
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
@@ -597,31 +599,78 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
  							  &smoke->prng)];
  }
+static int smoke_crescendo_thread(void *arg)
+{
+	struct preempt_smoke *smoke = arg;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_gem_context *ctx = smoke_context(smoke);
+		struct i915_request *rq;
+
+		mutex_lock(&smoke->i915->drm.struct_mutex);
+
+		ctx->sched.priority = count % I915_PRIORITY_MAX;
+
+		rq = i915_request_alloc(smoke->engine, ctx);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&smoke->i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+
+		mutex_unlock(&smoke->i915->drm.struct_mutex);
+
+		count++;

Very little outside the mutex so I am not sure if parallelization will work that well. Every thread could probably fill the ring in it's timeslice? And then it blocks the others until there is space. It will heavily rely on scheduler behaviour and mutex fairness I think.

Regards,

Tvrtko

+	} while (!__igt_timeout(end_time, NULL));
+
+	smoke->count = count;
+	return 0;
+}
+
  static int smoke_crescendo(struct preempt_smoke *smoke)
  {
+	struct task_struct *tsk[I915_NUM_ENGINES] = {};
+	struct preempt_smoke arg[I915_NUM_ENGINES];
  	struct intel_engine_cs *engine;
  	enum intel_engine_id id;
  	unsigned long count;
+	int err = 0;
+
+	mutex_unlock(&smoke->i915->drm.struct_mutex);
- count = 0;
  	for_each_engine(engine, smoke->i915, id) {
-		IGT_TIMEOUT(end_time);
+		arg[id] = *smoke;
+		arg[id].engine = engine;
+		arg[id].count = 0;
+
+		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+				      "igt/smoke:%d", id);
+		if (IS_ERR(tsk[id])) {
+			err = PTR_ERR(tsk[id]);
+			break;
+		}
+	}
- do {
-			struct i915_gem_context *ctx = smoke_context(smoke);
-			struct i915_request *rq;
+	count = 0;
+	for_each_engine(engine, smoke->i915, id) {
+		int status;
- ctx->sched.priority = count % I915_PRIORITY_MAX;
+		if (IS_ERR_OR_NULL(tsk[id]))
+			continue;
- rq = i915_request_alloc(engine, ctx);
-			if (IS_ERR(rq))
-				return PTR_ERR(rq);
+		status = kthread_stop(tsk[id]);
+		if (status && !err)
+			err = status;
- i915_request_add(rq);
-			count++;
-		} while (!__igt_timeout(end_time, NULL));
+		count += arg[id].count;
  	}
+ mutex_lock(&smoke->i915->drm.struct_mutex);
+
  	pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
  		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
  	return 0;

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux