Re: [PATCH v5 26/35] drm/i915: Added debugfs interface to scheduler tuning parameters

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 23/02/2016 21:06, Jesse Barnes wrote:
On 02/18/2016 06:27 AM, John.C.Harrison@xxxxxxxxx wrote:
From: John Harrison <John.C.Harrison@xxxxxxxxx>

There are various parameters within the scheduler which can be tuned
to improve performance, reduce memory footprint, etc. This change adds
support for altering these via debugfs.

v2: Updated for priorities now being signed values.

v5: Squashed priority bumping entries into this patch rather than a
separate patch all of their own.

For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx>
---
  drivers/gpu/drm/i915/i915_debugfs.c | 169 ++++++++++++++++++++++++++++++++++++
  1 file changed, 169 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b923949..7d01c07 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -39,6 +39,7 @@
  #include "intel_ringbuffer.h"
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
+#include "i915_scheduler.h"
enum {
  	ACTIVE_LIST,
@@ -1122,6 +1123,168 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
  			i915_next_seqno_get, i915_next_seqno_set,
  			"0x%llx\n");
+static int
+i915_scheduler_priority_min_get(void *data, u64 *val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	*val = (u64) scheduler->priority_level_min;
+	return 0;
+}
+
+static int
+i915_scheduler_priority_min_set(void *data, u64 val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	scheduler->priority_level_min = (int32_t) val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_scheduler_priority_min_fops,
+			i915_scheduler_priority_min_get,
+			i915_scheduler_priority_min_set,
+			"%lld\n");
+
+static int
+i915_scheduler_priority_max_get(void *data, u64 *val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	*val = (u64) scheduler->priority_level_max;
+	return 0;
+}
+
+static int
+i915_scheduler_priority_max_set(void *data, u64 val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	scheduler->priority_level_max = (int32_t) val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_scheduler_priority_max_fops,
+			i915_scheduler_priority_max_get,
+			i915_scheduler_priority_max_set,
+			"%lld\n");
+
+static int
+i915_scheduler_priority_bump_get(void *data, u64 *val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	*val = (u64) scheduler->priority_level_bump;
+	return 0;
+}
+
+static int
+i915_scheduler_priority_bump_set(void *data, u64 val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	scheduler->priority_level_bump = (u32) val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_scheduler_priority_bump_fops,
+			i915_scheduler_priority_bump_get,
+			i915_scheduler_priority_bump_set,
+			"%lld\n");
+
+static int
+i915_scheduler_priority_preempt_get(void *data, u64 *val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	*val = (u64) scheduler->priority_level_preempt;
+	return 0;
+}
+
+static int
+i915_scheduler_priority_preempt_set(void *data, u64 val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	scheduler->priority_level_preempt = (u32) val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_scheduler_priority_preempt_fops,
+			i915_scheduler_priority_preempt_get,
+			i915_scheduler_priority_preempt_set,
+			"%lld\n");
+
+static int
+i915_scheduler_min_flying_get(void *data, u64 *val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	*val = (u64) scheduler->min_flying;
+	return 0;
+}
+
+static int
+i915_scheduler_min_flying_set(void *data, u64 val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	scheduler->min_flying = (u32) val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_scheduler_min_flying_fops,
+			i915_scheduler_min_flying_get,
+			i915_scheduler_min_flying_set,
+			"%llu\n");
+
+static int
+i915_scheduler_file_queue_max_get(void *data, u64 *val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	*val = (u64) scheduler->file_queue_max;
+	return 0;
+}
+
+static int
+i915_scheduler_file_queue_max_set(void *data, u64 val)
+{
+	struct drm_device       *dev       = data;
+	struct drm_i915_private *dev_priv  = dev->dev_private;
+	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+
+	scheduler->file_queue_max = (u32) val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_scheduler_file_queue_max_fops,
+			i915_scheduler_file_queue_max_get,
+			i915_scheduler_file_queue_max_set,
+			"%llu\n");
+
  static int i915_frequency_info(struct seq_file *m, void *unused)
  {
  	struct drm_info_node *node = m->private;
@@ -5424,6 +5587,12 @@ static const struct i915_debugfs_files {
  	{"i915_gem_drop_caches", &i915_drop_caches_fops},
  	{"i915_error_state", &i915_error_state_fops},
  	{"i915_next_seqno", &i915_next_seqno_fops},
+	{"i915_scheduler_priority_min", &i915_scheduler_priority_min_fops},
+	{"i915_scheduler_priority_max", &i915_scheduler_priority_max_fops},
+	{"i915_scheduler_priority_bump", &i915_scheduler_priority_bump_fops},
+	{"i915_scheduler_priority_preempt", &i915_scheduler_priority_preempt_fops},
+	{"i915_scheduler_min_flying", &i915_scheduler_min_flying_fops},
+	{"i915_scheduler_file_queue_max", &i915_scheduler_file_queue_max_fops},
  	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
  	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
  	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},

Do these need to be serialized at all?  I guess some raciness doesn't hurt too much for these guys, unless somehow an inconsistent set of values would cause a livelock in the scheduler or something.
Serialised with what? Each other or the scheduler operation? Neither should be necessary. The scheduler will read the current values whenever it tests against one of these limits. If multiple are being changed while the system is busy, it doesn't really matter. They are just tuning values and best guesses type of numbers not array indices or other things that would cause kernel panics if you got them wrong. E.g. if you set the max file queue depth to smaller than the current queue contents that just means you won't be able to submit more stuff until the queue has drained - which is presumably the intended result of lowering the max queue value anyway. The queue won't leak the extra entries or get into an inconsistent state.



If not,
Reviewed-by: Jesse Barnes <jbarnes@xxxxxxxxxxxxxxxx>

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux