[PATCH 33/40] drm/i915: Added scheduler statistic reporting to debugfs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: John Harrison <John.C.Harrison@xxxxxxxxx>

It is useful for know what the scheduler is doing for both debugging
and performance analysis purposes. This change adds a bunch of
counters and such that keep track of various scheduler operations
(batches submitted, completed, flush requests, etc.). The data can
then be read in userland via the debugfs mechanism.

v2: Updated to match changes to scheduler implementation.

v3: Updated for changes to kill code and flush code.

Change-Id: I3266c631cd70c9eeb2c235f88f493e60462f85d7
For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_debugfs.c        | 77 +++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 11 +++-
 drivers/gpu/drm/i915/i915_scheduler.c      | 85 +++++++++++++++++++++++++++---
 drivers/gpu/drm/i915/i915_scheduler.h      | 36 +++++++++++++
 4 files changed, 200 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8f1c10c..9e7d67d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3603,6 +3603,82 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
 	return 0;
 }
 
+static int i915_scheduler_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+	struct i915_scheduler_stats *stats = scheduler->stats;
+	struct i915_scheduler_stats_nodes node_stats[I915_NUM_RINGS];
+	struct intel_engine_cs *ring;
+	char   str[50 * (I915_NUM_RINGS + 1)], name[50], *ptr;
+	int ret, i, r;
+
+	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+	if (ret)
+		return ret;
+
+#define PRINT_VAR(name, fmt, var)					\
+	do {								\
+		sprintf(str, "%-22s", name);				\
+		ptr = str + strlen(str);				\
+		for_each_ring(ring, dev_priv, r) {			\
+			sprintf(ptr, " %10" fmt, var);			\
+			ptr += strlen(ptr);				\
+		}							\
+		seq_printf(m, "%s\n", str);				\
+	} while (0)
+
+	PRINT_VAR("Ring name:",             "s", dev_priv->ring[r].name);
+	PRINT_VAR("  Ring seqno",           "d", ring->get_seqno(ring, false));
+	seq_putc(m, '\n');
+
+	seq_puts(m, "Batch submissions:\n");
+	PRINT_VAR("  Queued",               "u", stats[r].queued);
+	PRINT_VAR("  Submitted",            "u", stats[r].submitted);
+	PRINT_VAR("  Completed",            "u", stats[r].completed);
+	PRINT_VAR("  Expired",              "u", stats[r].expired);
+	seq_putc(m, '\n');
+
+	seq_puts(m, "Flush counts:\n");
+	PRINT_VAR("  By object",            "u", stats[r].flush_obj);
+	PRINT_VAR("  By request",           "u", stats[r].flush_req);
+	PRINT_VAR("  By stamp",             "u", stats[r].flush_stamp);
+	PRINT_VAR("  Blanket",              "u", stats[r].flush_all);
+	PRINT_VAR("  Entries bumped",       "u", stats[r].flush_bump);
+	PRINT_VAR("  Entries submitted",    "u", stats[r].flush_submit);
+	seq_putc(m, '\n');
+
+	seq_puts(m, "Miscellaneous:\n");
+	PRINT_VAR("  ExecEarly retry",      "u", stats[r].exec_early);
+	PRINT_VAR("  ExecFinal requeue",    "u", stats[r].exec_again);
+	PRINT_VAR("  ExecFinal killed",     "u", stats[r].exec_dead);
+	PRINT_VAR("  Fence wait",           "u", stats[r].fence_wait);
+	PRINT_VAR("  Fence wait again",     "u", stats[r].fence_again);
+	PRINT_VAR("  Fence wait ignore",    "u", stats[r].fence_ignore);
+	PRINT_VAR("  Fence supplied",       "u", stats[r].fence_got);
+	PRINT_VAR("  Hung flying",          "u", stats[r].kill_flying);
+	PRINT_VAR("  Hung queued",          "u", stats[r].kill_queued);
+	seq_putc(m, '\n');
+
+	seq_puts(m, "Queue contents:\n");
+	for_each_ring(ring, dev_priv, i)
+		i915_scheduler_query_stats(ring, node_stats + ring->id);
+
+	for (i = 0; i < (i915_sqs_MAX + 1); i++) {
+		sprintf(name, "  %s", i915_scheduler_queue_status_str(i));
+		PRINT_VAR(name, "d", node_stats[r].counts[i]);
+	}
+	seq_putc(m, '\n');
+
+#undef PRINT_VAR
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return 0;
+}
+
 struct pipe_crc_info {
 	const char *name;
 	struct drm_device *dev;
@@ -5571,6 +5647,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_semaphore_status", i915_semaphore_status, 0},
 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
+	{"i915_scheduler_info", i915_scheduler_info, 0},
 	{"i915_wa_registers", i915_wa_registers, 0},
 	{"i915_ddb_info", i915_ddb_info, 0},
 	{"i915_sseu_status", i915_sseu_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf9d804..c40ceea 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1423,13 +1423,20 @@ static int i915_early_fence_wait(struct intel_engine_cs *ring, int fence_fd)
 	}
 
 	if (!sync_fence_is_signaled(fence)) {
+		struct drm_i915_private *dev_priv = ring->dev->dev_private;
+		struct i915_scheduler *scheduler = dev_priv->scheduler;
+
 		/*
 		 * Wait forever for the fence to be signalled. This is safe
 		 * because the the mutex lock has not yet been acquired and
 		 * the wait is interruptible.
 		 */
-		if (!i915_safe_to_ignore_fence(ring, fence))
+		if (i915_safe_to_ignore_fence(ring, fence))
+			scheduler->stats[ring->id].fence_ignore++;
+		else {
+			scheduler->stats[ring->id].fence_wait++;
 			ret = sync_fence_wait(fence, -1);
+		}
 	}
 
 	sync_fence_put(fence);
@@ -1849,6 +1856,8 @@ err:
 	mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
+	dev_priv->scheduler->stats[ring->id].exec_early++;
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index be2430d..8c7a9e6 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -121,6 +121,9 @@ const char *i915_scheduler_queue_status_str(
 	case i915_sqs_dead:
 	return "Dead";
 
+	case i915_sqs_MAX:
+	return "Invalid";
+
 	default:
 	break;
 	}
@@ -264,9 +267,14 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 
 	BUG_ON(!scheduler);
 
+	if (qe->params.fence_wait)
+		scheduler->stats[ring->id].fence_got++;
+
 	if (i915.scheduler_override & i915_so_direct_submit) {
 		int ret;
 
+		scheduler->stats[qe->params.ring->id].queued++;
+
 		trace_i915_scheduler_queue(qe->params.ring, qe);
 
 		WARN_ON(qe->params.fence_wait &&
@@ -276,6 +284,7 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 
 		scheduler->flags[qe->params.ring->id] |= i915_sf_submitting;
 		ret = dev_priv->gt.execbuf_final(&qe->params);
+		scheduler->stats[qe->params.ring->id].submitted++;
 		scheduler->flags[qe->params.ring->id] &= ~i915_sf_submitting;
 
 		/*
@@ -309,6 +318,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 		if (qe->params.fence_wait)
 			sync_fence_put(qe->params.fence_wait);
 
+		scheduler->stats[qe->params.ring->id].expired++;
+
 		return 0;
 	}
 
@@ -391,6 +402,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 		not_flying = i915_scheduler_count_flying(scheduler, ring) <
 							 scheduler->min_flying;
 
+	scheduler->stats[ring->id].queued++;
+
 	trace_i915_scheduler_queue(ring, node);
 	trace_i915_scheduler_node_state_change(ring, node);
 
@@ -495,13 +508,17 @@ static void i915_scheduler_node_requeue(struct i915_scheduler_queue_entry *node)
 
 /* Give up on a node completely. For example, because it is causing the
  * ring to hang or is using some resource that no longer exists. */
-static void i915_scheduler_node_kill(struct i915_scheduler_queue_entry *node)
+static void i915_scheduler_node_kill(struct i915_scheduler *scheduler,
+				     struct i915_scheduler_queue_entry *node)
 {
 	BUG_ON(!node);
 	BUG_ON(I915_SQS_IS_COMPLETE(node));
 
-	if (I915_SQS_IS_FLYING(node))
+	if (I915_SQS_IS_FLYING(node)) {
 		trace_i915_scheduler_unfly(node->params.ring, node);
+		scheduler->stats[node->params.ring->id].kill_flying++;
+	} else
+		scheduler->stats[node->params.ring->id].kill_queued++;
 
 	node->status = i915_sqs_dead;
 	trace_i915_scheduler_node_state_change(node->params.ring, node);
@@ -534,10 +551,13 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
 	WARN_ON(!I915_SQS_IS_FLYING(node));
 
 	/* Node was in flight so mark it as complete. */
-	if (req->cancelled)
+	if (req->cancelled) {
 		node->status = i915_sqs_dead;
-	else
+		scheduler->stats[req->ring->id].kill_flying++;
+	} else {
 		node->status = i915_sqs_complete;
+		scheduler->stats[req->ring->id].completed++;
+	}
 
 	trace_i915_scheduler_node_state_change(req->ring, node);
 
@@ -665,6 +685,7 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
 
 		list_del(&node->link);
 		list_add(&node->link, &remove);
+		scheduler->stats[ring->id].expired++;
 
 		/* Strip the dependency info while the mutex is still locked */
 		i915_scheduler_remove_dependent(scheduler, node);
@@ -907,6 +928,35 @@ static int i915_scheduler_dump_locked(struct intel_engine_cs *ring, const char *
 	return 0;
 }
 
+int i915_scheduler_query_stats(struct intel_engine_cs *ring,
+			       struct i915_scheduler_stats_nodes *stats)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+	struct i915_scheduler_queue_entry  *node;
+	unsigned long   flags;
+
+	memset(stats, 0x00, sizeof(*stats));
+
+	spin_lock_irqsave(&scheduler->lock, flags);
+
+	list_for_each_entry(node, &scheduler->node_queue[ring->id], link) {
+		if (node->status >= i915_sqs_MAX) {
+			DRM_DEBUG_DRIVER("Invalid node state: %d! [uniq = %d, seqno = %d]\n",
+					 node->status, node->params.request->uniq, node->params.request->seqno);
+
+			stats->counts[i915_sqs_MAX]++;
+			continue;
+		}
+
+		stats->counts[node->status]++;
+	}
+
+	spin_unlock_irqrestore(&scheduler->lock, flags);
+
+	return 0;
+}
+
 int i915_scheduler_flush_stamp(struct intel_engine_cs *ring,
 			       unsigned long target,
 			       bool is_locked)
@@ -933,6 +983,7 @@ int i915_scheduler_flush_stamp(struct intel_engine_cs *ring,
 	}
 
 	spin_lock_irqsave(&scheduler->lock, flags);
+	scheduler->stats[ring->id].flush_stamp++;
 	i915_scheduler_priority_bump_clear(scheduler);
 	list_for_each_entry(node, &scheduler->node_queue[ring->id], link) {
 		if (!I915_SQS_IS_QUEUED(node))
@@ -943,12 +994,15 @@ int i915_scheduler_flush_stamp(struct intel_engine_cs *ring,
 
 		flush_count = i915_scheduler_priority_bump(scheduler,
 					node, scheduler->priority_level_max);
+		scheduler->stats[ring->id].flush_bump += flush_count;
 	}
 	spin_unlock_irqrestore(&scheduler->lock, flags);
 
 	if (flush_count) {
 		DRM_DEBUG_DRIVER("<%s> Bumped %d entries\n", ring->name, flush_count);
 		flush_count = i915_scheduler_submit_max_priority(ring, is_locked);
+		if (flush_count > 0)
+			scheduler->stats[ring->id].flush_submit += flush_count;
 	}
 
 	return flush_count;
@@ -975,6 +1029,8 @@ int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked)
 
 	BUG_ON(is_locked && (scheduler->flags[ring->id] & i915_sf_submitting));
 
+	scheduler->stats[ring->id].flush_all++;
+
 	do {
 		found = false;
 		spin_lock_irqsave(&scheduler->lock, flags);
@@ -989,6 +1045,7 @@ int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked)
 
 		if (found) {
 			ret = i915_scheduler_submit(ring, is_locked);
+			scheduler->stats[ring->id].flush_submit++;
 			if (ret < 0)
 				return ret;
 
@@ -1126,15 +1183,20 @@ static void i915_scheduler_wait_fence_signaled(struct sync_fence *fence,
 static bool i915_scheduler_async_fence_wait(struct drm_device *dev,
 					    struct i915_scheduler_queue_entry *node)
 {
+	struct drm_i915_private         *dev_priv = node->params.ring->dev->dev_private;
+	struct i915_scheduler           *scheduler = dev_priv->scheduler;
 	struct i915_sync_fence_waiter	*fence_waiter;
 	struct sync_fence		*fence = node->params.fence_wait;
 	int				signaled;
 	bool				success = true;
 
-	if ((node->flags & i915_qef_fence_waiting) == 0)
+	if ((node->flags & i915_qef_fence_waiting) == 0) {
 		node->flags |= i915_qef_fence_waiting;
-	else
+		scheduler->stats[node->params.ring->id].fence_wait++;
+	} else {
+		scheduler->stats[node->params.ring->id].fence_again++;
 		return true;
+	}
 
 	if (fence == NULL)
 		return false;
@@ -1199,8 +1261,10 @@ static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring,
 		else
 			signalled = true;
 
-		if (!signalled)
+		if (!signalled) {
 			signalled = i915_safe_to_ignore_fence(ring, node->params.fence_wait);
+			scheduler->stats[node->params.ring->id].fence_ignore++;
+		}
 
 		has_local  = false;
 		has_remote = false;
@@ -1337,6 +1401,8 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
 		 * list. So add it back in and mark it as in flight. */
 		i915_scheduler_fly_node(node);
 
+		scheduler->stats[ring->id].submitted++;
+
 		scheduler->flags[ring->id] |= i915_sf_submitting;
 		spin_unlock_irqrestore(&scheduler->lock, flags);
 		ret = dev_priv->gt.execbuf_final(&node->params);
@@ -1355,6 +1421,7 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
 			case ENOENT:
 				/* Fatal errors. Kill the node. */
 				requeue = -1;
+				scheduler->stats[ring->id].exec_dead++;
 			break;
 
 			case EAGAIN:
@@ -1364,12 +1431,14 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
 			case ERESTARTSYS:
 			case EINTR:
 				/* Supposedly recoverable errors. */
+				scheduler->stats[ring->id].exec_again++;
 			break;
 
 			default:
 				DRM_DEBUG_DRIVER("<%s> Got unexpected error from execfinal(): %d!\n",
 						 ring->name, ret);
 				/* Assume it is recoverable and hope for the best. */
+				scheduler->stats[ring->id].exec_again++;
 			break;
 			}
 
@@ -1385,7 +1454,7 @@ static int i915_scheduler_submit(struct intel_engine_cs *ring, bool was_locked)
 				 * later. */
 				break;
 			} else if (requeue == -1)
-				i915_scheduler_node_kill(node);
+				i915_scheduler_node_kill(scheduler, node);
 		}
 
 		/* Keep launching until the sky is sufficiently full. */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 1bf2fa6..9396ab4 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -76,6 +76,37 @@ struct i915_scheduler_queue_entry {
 };
 const char *i915_qe_state_str(struct i915_scheduler_queue_entry *node);
 
+struct i915_scheduler_stats_nodes {
+	uint32_t	counts[i915_sqs_MAX + 1];
+};
+
+struct i915_scheduler_stats {
+	/* Batch buffer counts: */
+	uint32_t            queued;
+	uint32_t            submitted;
+	uint32_t            completed;
+	uint32_t            expired;
+
+	/* Other stuff: */
+	uint32_t            flush_obj;
+	uint32_t            flush_req;
+	uint32_t            flush_stamp;
+	uint32_t            flush_all;
+	uint32_t            flush_bump;
+	uint32_t            flush_submit;
+
+	uint32_t            exec_early;
+	uint32_t            exec_again;
+	uint32_t            exec_dead;
+	uint32_t            kill_flying;
+	uint32_t            kill_queued;
+
+	uint32_t            fence_wait;
+	uint32_t            fence_again;
+	uint32_t            fence_ignore;
+	uint32_t            fence_got;
+};
+
 struct i915_scheduler {
 	struct list_head    node_queue[I915_NUM_RINGS];
 	uint32_t            flags[I915_NUM_RINGS];
@@ -87,6 +118,9 @@ struct i915_scheduler {
 	int32_t             priority_level_preempt;
 	uint32_t            min_flying;
 	uint32_t            file_queue_max;
+
+	/* Statistics: */
+	struct i915_scheduler_stats     stats[I915_NUM_RINGS];
 };
 
 /* Flag bits for i915_scheduler::flags */
@@ -125,6 +159,8 @@ int         i915_scheduler_dump(struct intel_engine_cs *ring,
 int         i915_scheduler_dump_all(struct drm_device *dev, const char *msg);
 bool        i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
 					      bool *completed, bool *busy);
+int         i915_scheduler_query_stats(struct intel_engine_cs *ring,
+				       struct i915_scheduler_stats_nodes *stats);
 bool        i915_scheduler_file_queue_is_full(struct drm_file *file);
 
 #endif  /* _I915_SCHEDULER_H_ */
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux