Recently discovered in commit bdae33b8b82b ("drm/i915: Use maximum write flush for pwrite_gtt") was that we needed to our full write barrier before changing the GGTT PTE to ensure that our indirect writes through the GTT landed before the PTE changed (and the writes end up in a different page). That also applies to our GGTT relocation path. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: stable@xxxxxxxxxxxxxxx --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 8a2047c4e7c3..01901dad33f7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1019,11 +1019,12 @@ static void reloc_cache_reset(struct reloc_cache *cache) kunmap_atomic(vaddr); i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); } else { - wmb(); + struct i915_ggtt *ggtt = cache_to_ggtt(cache); + + intel_gt_flush_ggtt_writes(ggtt->vm.gt); io_mapping_unmap_atomic((void __iomem *)vaddr); - if (cache->node.allocated) { - struct i915_ggtt *ggtt = cache_to_ggtt(cache); + if (cache->node.allocated) { ggtt->vm.clear_range(&ggtt->vm, cache->node.start, cache->node.size); @@ -1078,6 +1079,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, void *vaddr; if (cache->vaddr) { + intel_gt_flush_ggtt_writes(ggtt->vm.gt); io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); } else { struct i915_vma *vma; @@ -1119,7 +1121,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, offset = cache->node.start; if (cache->node.allocated) { - wmb(); ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, page), offset, I915_CACHE_NONE, 0); -- 2.22.0