[PATCH v2] drm/i915: Mark all objects as having being written to following hibernation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



During hibernation, all objects will have had their page contents
written to disk and then restored upon resume. This means that every
page will be dirty and we need to treat all objects as being in the CPU
domain and require their contents to be flushed before use.

At present we only do so for those objects bound into the Global GTT,
however we need to mark all allocated objects as being unclean.

v2: Actually restrict the clflushing of object content to post-hibernate
as we always call restore_gtt_mappings because we cannot trust the BIOS
not to scribble over the GGTT (but we can be confident that they will
not touch system pages i.e. normal shmemfs objects).

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_drv.c     | 22 ++++++++++++++++------
 drivers/gpu/drm/i915/i915_gem_gtt.c | 29 ++++++++++++++++++++++-------
 drivers/gpu/drm/i915/i915_gem_gtt.h |  2 +-
 3 files changed, 39 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4013373ca293..4253ca1cceff 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -701,7 +701,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
 	return i915_drm_suspend_late(dev, false);
 }
 
-static int i915_drm_resume(struct drm_device *dev)
+static int i915_drm_resume(struct drm_device *dev, bool thaw)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -710,7 +710,7 @@ static int i915_drm_resume(struct drm_device *dev)
 	intel_csr_ucode_resume(dev_priv);
 
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_restore_gtt_mappings(dev);
+	i915_gem_restore_gtt_mappings(dev, thaw);
 	mutex_unlock(&dev->struct_mutex);
 
 	i915_restore_state(dev);
@@ -867,7 +867,7 @@ int i915_resume_switcheroo(struct drm_device *dev)
 	if (ret)
 		return ret;
 
-	return i915_drm_resume(dev);
+	return i915_drm_resume(dev, false);
 }
 
 /**
@@ -1054,14 +1054,24 @@ static int i915_pm_resume_early(struct device *dev)
 	return i915_drm_resume_early(drm_dev);
 }
 
-static int i915_pm_resume(struct device *dev)
+static int __i915_pm_resume(struct device *dev, bool thaw)
 {
 	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
 
 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_resume(drm_dev);
+	return i915_drm_resume(drm_dev, thaw);
+}
+
+static int i915_pm_resume(struct device *dev)
+{
+	return __i915_pm_resume(dev, false);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+	return __i915_pm_resume(dev, true);
 }
 
 static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
@@ -1628,7 +1638,7 @@ static const struct dev_pm_ops i915_pm_ops = {
 	.freeze = i915_pm_suspend,
 	.freeze_late = i915_pm_suspend_late,
 	.thaw_early = i915_pm_resume_early,
-	.thaw = i915_pm_resume,
+	.thaw = i915_pm_thaw,
 	.poweroff = i915_pm_suspend,
 	.poweroff_late = i915_pm_poweroff_late,
 	.restore_early = i915_pm_resume_early,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6a4dfdb5ec27..9b4a55667eaf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2863,13 +2863,12 @@ out_gtt_cleanup:
 	return ret;
 }
 
-void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+void i915_gem_restore_gtt_mappings(struct drm_device *dev, bool thaw)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_object *obj;
 	struct i915_vma *vma;
-	bool flush;
 
 	i915_check_and_clear_faults(dev);
 
@@ -2877,21 +2876,37 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
 			       true);
 
+	/* Coming from a hibernation image, the pages will have
+	 * been written to by the cpu and thus in CPU domain. Mark them
+	 * so that we remember to clean the objects, if we need to, before
+	 * use by the GPU.
+	 */
+	if (thaw) {
+		list_for_each_entry(obj,
+				    &dev_priv->mm.unbound_list,
+				    global_list) {
+			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+		}
+	}
+
 	/* Cache flush objects bound into GGTT and rebind them. */
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		flush = false;
+		if (thaw) {
+			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+		}
+
 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			if (!vma->is_ggtt)
 				break;
 
 			WARN_ON(i915_vma_bind(vma, obj->cache_level,
 					      PIN_UPDATE));
-
-			flush = true;
 		}
 
-		if (flush)
-			i915_gem_clflush_object(obj, obj->pin_display);
+		if (obj->pin_display)
+			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
 	}
 
 	if (INTEL_INFO(dev)->gen >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 001e224c4c44..b4326fc9403a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -606,7 +606,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
 
 void i915_check_and_clear_faults(struct drm_device *dev);
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
-void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+void i915_gem_restore_gtt_mappings(struct drm_device *dev, bool thaw);
 
 int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 					    struct sg_table *pages);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux