[PATCH 7/9] drm/i915: Set our shrinker->batch to 4096 (~16MiB)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Prefer to defer activating our GEM shrinker until we have a few
megabytes to free; or we have accumulated sufficient mempressure by
deferring the reclaim to force a shrink. The intent is that because our
objects may typically be large, we are too effective at shrinking and
are not rewarded for freeing more pages than the batch. It will also
defer the initial shrinking to hopefully put it at a lower priority than
say the buffer cache (although it will balance out over a number of
reclaims, with GEM being more bursty).

v2: Give it a feedback system to try and tune the batch size towards
an effective size for the available objects.
v3: Start keeping track of shrinker stats in debugfs
v4: Protect against finding no shrinkable objects (div-by-zero)

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_debugfs.c      | 11 +++++++++++
 drivers/gpu/drm/i915/i915_gem_shrinker.c | 34 +++++++++++++++++++++++++-------
 2 files changed, 38 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 997feddb8436..6d643642e0ce 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3331,6 +3331,16 @@ static int i915_engine_info(struct seq_file *m, void *unused)
 	return 0;
 }
 
+static int i915_shrinker_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+
+	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
+	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+
+	return 0;
+}
+
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4811,6 +4821,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_dmc_info", i915_dmc_info, 0},
 	{"i915_display_info", i915_display_info, 0},
 	{"i915_engine_info", i915_engine_info, 0},
+	{"i915_shrinker_info", i915_shrinker_info, 0},
 	{"i915_semaphore_status", i915_semaphore_status, 0},
 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 175765111f4e..bb4079f9abfd 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -295,20 +295,39 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
 	struct drm_i915_gem_object *obj;
+	unsigned long num_objects = 0;
 	unsigned long count = 0;
 
-	spin_lock(&dev_priv->mm.obj_lock);
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link)
-		if (can_release_pages(obj))
+	spin_lock(&i915->mm.obj_lock);
+	list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+		if (can_release_pages(obj)) {
 			count += obj->base.size >> PAGE_SHIFT;
+			num_objects++;
+		}
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link)
-		if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
+	list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+		if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
 			count += obj->base.size >> PAGE_SHIFT;
-	spin_unlock(&dev_priv->mm.obj_lock);
+			num_objects++;
+		}
+	spin_unlock(&i915->mm.obj_lock);
+
+	/* Update our prefered vmscan batch size for the next pass.
+	 * Our rough guess for an effective batch size is roughly 2
+	 * available GEM objects worth of pages. That is we don't want
+	 * the shrinker to fire, until it is worth the cost of freeing an
+	 * entire GEM object.
+	 */
+	if (num_objects) {
+		unsigned long avg = 2 * count / num_objects;
+
+		i915->mm.shrinker.batch =
+			max((i915->mm.shrinker.batch + avg) >> 1,
+			    128ul /* default SHRINK_BATCH */);
+	}
 
 	return count;
 }
@@ -473,6 +492,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
 	dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
 	dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
 	dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+	dev_priv->mm.shrinker.batch = 4096;
 	WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
 
 	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
-- 
2.15.0.rc0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux