For some platfroms we use stop_machine version of gen8_ggtt_insert_page/gen8_ggtt_insert_entries to avoid a concurrent GGTT access bug but this causes a circular locking dependency warning: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&ggtt->error_mutex); lock(dma_fence_map); lock(&ggtt->error_mutex); lock(cpu_hotplug_lock); Fix this by calling gen8_ggtt_insert_page/gen8_ggtt_insert_entries directly at error capture which is concurrent GGTT access safe because reset path make sure of that. v2: Fix rebase conflict and added a comment. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5595 Reviewed-by: Gwan-gyeong Mun <gwan-gyeong.mun@xxxxxxxxx> Suggested-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Signed-off-by: Nirmoy Das <nirmoy.das@xxxxxxxxx> --- drivers/gpu/drm/i915/gt/intel_ggtt.c | 10 ++++++++++ drivers/gpu/drm/i915/gt/intel_gtt.h | 9 +++++++++ drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 5 ++++- drivers/gpu/drm/i915/i915_gpu_error.c | 8 ++++++-- 4 files changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index ffff96180313..15a915bb4088 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -960,6 +960,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) if (intel_vm_no_concurrent_access_wa(i915)) { ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; + + /* + * Calling stop_machine() version of GGTT update function + * at error capture/reset path will raise lockdep warning. + * Allow calling gen8_ggtt_insert_* directly at reset path + * which is safe from parallel GGTT updates. + */ + ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; + ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries; + ggtt->vm.bind_async_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 29fd3a9e8b2e..e639434e97fd 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -306,6 +306,15 @@ struct i915_address_space { struct i915_vma_resource *vma_res, enum i915_cache_level cache_level, u32 flags); + void (*raw_insert_page)(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level cache_level, + u32 flags); + void (*raw_insert_entries)(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + enum i915_cache_level cache_level, + u32 flags); void (*cleanup)(struct i915_address_space *vm); void (*foreach)(struct i915_address_space *vm, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index d2c5c9367cc4..c06e83872c34 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -493,7 +493,10 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw) if (i915_gem_object_is_lmem(obj)) pte_flags |= PTE_LM; - ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags); + if (ggtt->vm.raw_insert_entries) + ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags); + else + ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags); } static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index bff8a111424a..f9b1969ed7ed 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1104,8 +1104,12 @@ i915_vma_coredump_create(const struct intel_gt *gt, for_each_sgt_daddr(dma, iter, vma_res->bi.pages) { mutex_lock(&ggtt->error_mutex); - ggtt->vm.insert_page(&ggtt->vm, dma, slot, - I915_CACHE_NONE, 0); + if (ggtt->vm.raw_insert_page) + ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot, + I915_CACHE_NONE, 0); + else + ggtt->vm.insert_page(&ggtt->vm, dma, slot, + I915_CACHE_NONE, 0); mb(); s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); -- 2.35.1