[RFC 05/10] drm/i915: Track per-context engine busyness

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>

Some customers want to know how much of the GPU time are their clients
using in order to make dynamic load balancing decisions.

With the hooks already in place which track the overall engine busyness,
we can extend that slightly to split that time between contexts.

v2: Fix accounting for tail updates.
v3: Rebase.
v4: Mark currently running contexts as active on stats enable.
v5: Include some headers to fix the build.
v6: Added fine grained lock.
v7: Convert to seqlock. (Chris Wilson)
v8: Rebase and tidy with helpers.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
Cc: gordon.kelly@xxxxxxxxx
---
 drivers/gpu/drm/i915/i915_gem_context.c |  1 +
 drivers/gpu/drm/i915/i915_gem_context.h | 17 +++++++
 drivers/gpu/drm/i915/intel_engine_cs.c  | 27 +++++++++++
 drivers/gpu/drm/i915/intel_lrc.c        | 62 +++++++++++++++++++++----
 4 files changed, 97 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index bff3788908e0..37cace775b31 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -283,6 +283,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 
 		ce->gem_context = ctx;
 		ce->engine = dev_priv->engine[n];
+		seqlock_init(&ce->stats.lock);
 	}
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 4d6994f311be..e3d9948f7186 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -28,6 +28,7 @@
 #include <linux/bitops.h>
 #include <linux/list.h>
 #include <linux/radix-tree.h>
+#include <linux/seqlock.h>
 
 #include "i915_gem.h"
 #include "i915_scheduler.h"
@@ -160,6 +161,13 @@ struct i915_gem_context {
 		u64 lrc_desc;
 		int pin_count;
 
+		struct intel_context_stats {
+			seqlock_t lock;
+			bool active;
+			ktime_t start;
+			ktime_t total;
+		} stats;
+
 		const struct intel_context_ops *ops;
 	} __engine[I915_NUM_ENGINES];
 
@@ -339,4 +347,13 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
 	kref_put(&ctx->ref, i915_gem_context_release);
 }
 
+static inline void
+__intel_context_stats_start(struct intel_context_stats *stats, ktime_t now)
+{
+	stats->start = now;
+	stats->active = true;
+}
+
+ktime_t intel_context_get_busy_time(struct intel_context *ce);
+
 #endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index abfde8968900..020391c8a874 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1580,6 +1580,14 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 
 		engine->stats.enabled_at = ktime_get();
 
+		/* Mark currently running context as active. */
+		if (port_isset(port)) {
+			struct i915_request *rq = port_request(port);
+
+			__intel_context_stats_start(&rq->hw_context->stats,
+						    engine->stats.enabled_at);
+		}
+
 		/* XXX submission method oblivious? */
 		while (num_ports-- && port_isset(port)) {
 			engine->stats.active++;
@@ -1653,6 +1661,25 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
 	write_sequnlock_irqrestore(&engine->stats.lock, flags);
 }
 
+ktime_t intel_context_get_busy_time(struct intel_context *ce)
+{
+	unsigned int seq;
+	ktime_t total;
+
+	do {
+		seq = read_seqbegin(&ce->stats.lock);
+
+		total = ce->stats.total;
+
+		if (ce->stats.active)
+			total = ktime_add(total,
+					  ktime_sub(ktime_get(),
+						    ce->stats.start));
+	} while (read_seqretry(&ce->stats.lock, seq));
+
+	return total;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #include "selftests/intel_engine_cs.c"
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3947bdcd8ea6..72e2a9065b0f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -376,18 +376,48 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
 }
 
 static inline void
-intel_engine_context_in(struct intel_engine_cs *engine)
+intel_context_stats_start(struct intel_context_stats *stats, ktime_t now)
 {
+	write_seqlock(&stats->lock);
+	__intel_context_stats_start(stats, now);
+	write_sequnlock(&stats->lock);
+}
+
+static inline void
+intel_context_stats_stop(struct intel_context_stats *stats, ktime_t now)
+{
+	write_seqlock(&stats->lock);
+	GEM_BUG_ON(!stats->start);
+	stats->total = ktime_add(stats->total, ktime_sub(now, stats->start));
+	stats->active = false;
+	write_sequnlock(&stats->lock);
+}
+
+static inline void
+intel_context_in(struct intel_context *ce, bool submit)
+{
+	struct intel_engine_cs *engine = ce->engine;
 	unsigned long flags;
+	ktime_t now;
 
 	if (READ_ONCE(engine->stats.enabled) == 0)
 		return;
 
 	write_seqlock_irqsave(&engine->stats.lock, flags);
 
+	if (submit) {
+		now = ktime_get();
+		intel_context_stats_start(&ce->stats, now);
+	} else {
+		now = 0;
+	}
+
 	if (engine->stats.enabled > 0) {
-		if (engine->stats.active++ == 0)
-			engine->stats.start = ktime_get();
+		if (engine->stats.active++ == 0) {
+			if (!now)
+				now = ktime_get();
+			engine->stats.start = now;
+		}
 		GEM_BUG_ON(engine->stats.active == 0);
 	}
 
@@ -395,8 +425,9 @@ intel_engine_context_in(struct intel_engine_cs *engine)
 }
 
 static inline void
-intel_engine_context_out(struct intel_engine_cs *engine)
+intel_context_out(struct intel_context *ce)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	unsigned long flags;
 
 	if (READ_ONCE(engine->stats.enabled) == 0)
@@ -405,14 +436,25 @@ intel_engine_context_out(struct intel_engine_cs *engine)
 	write_seqlock_irqsave(&engine->stats.lock, flags);
 
 	if (engine->stats.enabled > 0) {
+		struct execlist_port *next_port = &engine->execlists.port[1];
+		ktime_t now = ktime_get();
 		ktime_t last;
 
+		intel_context_stats_stop(&ce->stats, now);
+
+		if (port_isset(next_port)) {
+			struct i915_request *next_rq = port_request(next_port);
+
+			intel_context_stats_start(&next_rq->hw_context->stats,
+						  now);
+		}
+
 		if (engine->stats.active && --engine->stats.active == 0) {
 			/*
 			 * Decrement the active context count and in case GPU
 			 * is now idle add up to the running total.
 			 */
-			last = ktime_sub(ktime_get(), engine->stats.start);
+			last = ktime_sub(now, engine->stats.start);
 
 			engine->stats.total = ktime_add(engine->stats.total,
 							last);
@@ -422,7 +464,7 @@ intel_engine_context_out(struct intel_engine_cs *engine)
 			 * the first event in which case we account from the
 			 * time stats gathering was turned on.
 			 */
-			last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+			last = ktime_sub(now, engine->stats.enabled_at);
 
 			engine->stats.total = ktime_add(engine->stats.total,
 							last);
@@ -446,16 +488,16 @@ execlists_user_end(struct intel_engine_execlists *execlists)
 }
 
 static inline void
-execlists_context_schedule_in(struct i915_request *rq)
+execlists_context_schedule_in(struct i915_request *rq, unsigned int port)
 {
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
-	intel_engine_context_in(rq->engine);
+	intel_context_in(rq->hw_context, port == 0);
 }
 
 static inline void
 execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
 {
-	intel_engine_context_out(rq->engine);
+	intel_context_out(rq->hw_context);
 	execlists_context_status_change(rq, status);
 	trace_i915_request_out(rq);
 }
@@ -521,7 +563,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 		if (rq) {
 			GEM_BUG_ON(count > !n);
 			if (!count++)
-				execlists_context_schedule_in(rq);
+				execlists_context_schedule_in(rq, n);
 			port_set(&port[n], port_pack(rq, count));
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux