Execbuffer submission will perform its own WW locking, and we cannot rely on the implicit lock there. This also makes it clear that the GVT code will get a lockdep splat when multiple batchbuffer shadows need to be performed in the same instance, fix that up. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 20 ++++++------------- drivers/gpu/drm/i915/gem/i915_gem_object.h | 1 - .../gpu/drm/i915/gem/selftests/huge_pages.c | 5 ++++- .../i915/gem/selftests/i915_gem_coherency.c | 14 +++++++++---- .../drm/i915/gem/selftests/i915_gem_context.c | 12 ++++++++--- drivers/gpu/drm/i915/gt/intel_renderstate.c | 9 +++++++-- drivers/gpu/drm/i915/gvt/cmd_parser.c | 9 ++++++++- drivers/gpu/drm/i915/i915_cmd_parser.c | 16 ++++++++++++++- 8 files changed, 59 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 31816bdcecaf..8c5c06963de2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -591,19 +591,17 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; - ret = i915_gem_object_lock_interruptible(NULL, obj); - if (ret) - return ret; + assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); if (ret) - goto err_unlock; + return ret; ret = i915_gem_object_pin_pages(obj); if (ret) - goto err_unlock; + return ret; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || !static_cpu_has(X86_FEATURE_CLFLUSH)) { @@ -631,8 +629,6 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, err_unpin: i915_gem_object_unpin_pages(obj); -err_unlock: - i915_gem_object_unlock(obj); return ret; } @@ -645,20 +641,18 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; - ret = i915_gem_object_lock_interruptible(NULL, obj); - if (ret) - return ret; + assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT); if (ret) - goto err_unlock; + return ret; ret = i915_gem_object_pin_pages(obj); if (ret) - goto err_unlock; + return ret; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || !static_cpu_has(X86_FEATURE_CLFLUSH)) { @@ -695,7 +689,5 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, err_unpin: i915_gem_object_unpin_pages(obj); -err_unlock: - i915_gem_object_unlock(obj); return ret; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index c7a554ae151e..681a7afee20e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -427,7 +427,6 @@ static inline void i915_gem_object_finish_access(struct drm_i915_gem_object *obj) { i915_gem_object_unpin_pages(obj); - i915_gem_object_unlock(obj); } static inline struct intel_engine_cs * diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 69cc6d39804f..01f3318fa1fb 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -991,9 +991,10 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) unsigned long n; int err; + i915_gem_object_lock(NULL, obj); err = i915_gem_object_prepare_read(obj, &needs_flush); if (err) - return err; + goto err_unlock; for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); @@ -1013,6 +1014,8 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) } i915_gem_object_finish_access(obj); +err_unlock: + i915_gem_object_unlock(obj); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 7bbdd5bf1cec..504dfa5061f7 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -27,9 +27,10 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) u32 *cpu; int err; + i915_gem_object_lock(NULL, ctx->obj); err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush); if (err) - return err; + goto out; page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); @@ -46,7 +47,9 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) kunmap_atomic(map); i915_gem_object_finish_access(ctx->obj); - return 0; +out: + i915_gem_object_unlock(ctx->obj); + return err; } static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) @@ -57,9 +60,10 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) u32 *cpu; int err; + i915_gem_object_lock(NULL, ctx->obj); err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush); if (err) - return err; + goto out; page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); @@ -73,7 +77,9 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) kunmap_atomic(map); i915_gem_object_finish_access(ctx->obj); - return 0; +out: + i915_gem_object_unlock(ctx->obj); + return err; } static int gtt_set(struct context *ctx, unsigned long offset, u32 v) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index d7b30daca5cb..68fe74ab0d19 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -461,9 +461,10 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) unsigned int n, m, need_flush; int err; + i915_gem_object_lock(NULL, obj); err = i915_gem_object_prepare_write(obj, &need_flush); if (err) - return err; + goto out; for (n = 0; n < real_page_count(obj); n++) { u32 *map; @@ -479,7 +480,9 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) i915_gem_object_finish_access(obj); obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; obj->write_domain = 0; - return 0; +out: + i915_gem_object_unlock(obj); + return err; } static noinline int cpu_check(struct drm_i915_gem_object *obj, @@ -488,9 +491,10 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, unsigned int n, m, needs_flush; int err; + i915_gem_object_lock(NULL, obj); err = i915_gem_object_prepare_read(obj, &needs_flush); if (err) - return err; + goto out_unlock; for (n = 0; n < real_page_count(obj); n++) { u32 *map; @@ -527,6 +531,8 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, } i915_gem_object_finish_access(obj); +out_unlock: + i915_gem_object_unlock(obj); return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 5954ecc3207f..7b819535ed4e 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -196,6 +196,8 @@ int intel_renderstate_init(struct intel_renderstate *so, if (err) goto err_vma; + i915_gem_object_lock(NULL, so->vma->obj); + err = render_state_setup(so, engine->i915); if (err) goto err_unpin; @@ -203,6 +205,7 @@ int intel_renderstate_init(struct intel_renderstate *so, return 0; err_unpin: + i915_gem_object_unlock(so->vma->obj); i915_vma_unpin(so->vma); err_vma: i915_vma_close(so->vma); @@ -235,16 +238,18 @@ int intel_renderstate_emit(struct intel_renderstate *so, return err; } - i915_vma_lock(so->vma); err = i915_request_await_object(rq, so->vma->obj, false); if (err == 0) err = i915_vma_move_to_active(so->vma, rq, 0); - i915_vma_unlock(so->vma); return err; } void intel_renderstate_fini(struct intel_renderstate *so) { + if (!so->vma) + return; + + i915_gem_object_unlock(so->vma->obj); i915_vma_unpin_and_release(&so->vma, 0); } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 85cde10520e4..9ce04df77f6b 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1865,10 +1865,14 @@ static int perform_bb_shadow(struct parser_exec_state *s) goto err_free_bb; } - ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush); + ret = i915_gem_object_lock_interruptible(NULL, bb->obj); if (ret) goto err_free_obj; + ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush); + if (ret) + goto err_unlock; + bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB); if (IS_ERR(bb->va)) { ret = PTR_ERR(bb->va); @@ -1893,6 +1897,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) if (ret) goto err_unmap; + i915_gem_object_unlock(bb->obj); INIT_LIST_HEAD(&bb->list); list_add(&bb->list, &s->workload->shadow_bb); @@ -1919,6 +1924,8 @@ static int perform_bb_shadow(struct parser_exec_state *s) i915_gem_object_unpin_map(bb->obj); err_finish_shmem_access: i915_gem_object_finish_access(bb->obj); +err_unlock: + i915_gem_object_unlock(bb->obj); err_free_obj: i915_gem_object_put(bb->obj); err_free_bb: diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 2ed497e7c9fd..c5b9ec5b3d25 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1136,18 +1136,31 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, void *dst, *src; int ret; - ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush); + ret = i915_gem_object_lock_interruptible(NULL, dst_obj); if (ret) return ERR_PTR(ret); + ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush); + if (ret) { + i915_gem_object_unlock(dst_obj); + return ERR_PTR(ret); + } + dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB); i915_gem_object_finish_access(dst_obj); + i915_gem_object_unlock(dst_obj); + if (IS_ERR(dst)) return dst; + ret = i915_gem_object_lock_interruptible(NULL, src_obj); + if (ret) + return ERR_PTR(ret); + ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush); if (ret) { i915_gem_object_unpin_map(dst_obj); + i915_gem_object_unlock(src_obj); return ERR_PTR(ret); } @@ -1196,6 +1209,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, } i915_gem_object_finish_access(src_obj); + i915_gem_object_unlock(src_obj); /* dst_obj is returned with vmap pinned */ *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER; -- 2.24.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx