Our assumption is that indirect writes via the GTT are naturally flushed when we enter runtime suspend. However, from the look of bxt in our CI, this is not true and so we must apply our trick of doing a mmio to serialise the writes. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 43834dee4e8d..6b9352248925 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2023,6 +2023,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) { struct drm_i915_gem_object *obj, *on; + unsigned long flags; int i; /* @@ -2063,6 +2064,17 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link)); reg->dirty = true; } + + spin_lock_irqsave(&dev_priv->uncore.lock, flags); + POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { + if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) { + obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; + obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; + } + } } static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) -- 2.14.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx