From: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx> Currently we have a lot of places where we hold the gem object lock, but haven't yet been converted to the ww dance. Complain loudly about those places. i915_vma_pin shouldn't have the obj lock held, so we can do a ww dance, while i915_vma_pin_ww should. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx> Cc: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gt/intel_renderstate.c | 2 +- drivers/gpu/drm/i915/gt/intel_timeline.c | 4 +- drivers/gpu/drm/i915/i915_vma.c | 46 +++++++++++++++++++-- drivers/gpu/drm/i915/i915_vma.h | 5 +++ 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index ea2a77c7b469..a68e5c23a67c 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -196,7 +196,7 @@ int intel_renderstate_init(struct intel_renderstate *so, if (err) goto err_context; - err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin_ww(so->vma, &so->ww, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) goto err_context; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 479eb5440bc6..b2d04717db20 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -46,7 +46,7 @@ static int __hwsp_alloc(struct intel_gt *gt, struct intel_timeline_hwsp *hwsp) goto out_unlock; } - /* Pin early so we can call i915_ggtt_pin unlocked. */ + /* Pin early so we can call i915_ggtt_pin_unlocked(). */ hwsp->vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(hwsp->vaddr)) { ret = PTR_ERR(hwsp->vaddr); @@ -514,7 +514,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, goto err_rollback; } - err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH); + err = i915_ggtt_pin_unlocked(vma, 0, PIN_HIGH); if (err) { __idle_hwsp_free(vma->private, cacheline); goto err_rollback; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 8e8c80ccbe32..e07621825da9 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -862,7 +862,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, unsigned int bound; int err; - if (IS_ENABLED(CONFIG_PROVE_LOCKING) && debug_locks) { +#ifdef CONFIG_PROVE_LOCKING + if (debug_locks) { bool pinned_bind_wo_alloc = vma->obj && i915_gem_object_has_pinned_pages(vma->obj) && !vma->vm->allocate_va_range; @@ -870,7 +871,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (lockdep_is_held(&vma->vm->i915->drm.struct_mutex) && !pinned_bind_wo_alloc) WARN_ON(!ww); + if (ww && vma->resv) + assert_vma_held(vma); } +#endif BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); @@ -1017,8 +1021,8 @@ static void flush_idle_contexts(struct intel_gt *gt) intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); } -int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, - u32 align, unsigned int flags) +static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u32 align, unsigned int flags, bool unlocked) { struct i915_address_space *vm = vma->vm; int err; @@ -1026,7 +1030,10 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!i915_vma_is_ggtt(vma)); do { - err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); + if (ww || unlocked) + err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); + else + err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); if (err != -ENOSPC) { if (!err) { err = i915_vma_wait_for_bind(vma); @@ -1045,6 +1052,37 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, } while (1); } +int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u32 align, unsigned int flags) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv)); +#endif + + return __i915_ggtt_pin(vma, ww, align, flags, false); +} + +/** + * i915_ggtt_pin_unlocked - Pin a vma to ggtt without the underlying + * object's dma-resv held, but with object pages pinned. + * + * @vma: The vma to pin. + * @align: ggtt alignment. + * @flags: Pinning flags + * + * RETURN: Zero on success, negative error code on error. + * + * This function relies on the fact that object pages are already pinned, + * and that ggtt pinning doesn't require any page table page allocations + * to pin a vma without dma_resv lock and ww acquire context. + */ +int i915_ggtt_pin_unlocked(struct i915_vma *vma, u32 align, unsigned int flags) +{ + if (IS_ENABLED(CONFIG_LOCKDEP)) + WARN_ON(vma->obj && !i915_gem_object_has_pinned_pages(vma->obj)); + return __i915_ggtt_pin(vma, NULL, align, flags, true); +} + static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) { /* diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 5b3a3c653454..22387a361999 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -243,12 +243,17 @@ i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, static inline int __must_check i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { +#ifdef CONFIG_LOCKDEP + WARN_ON_ONCE(vma->resv && dma_resv_held(vma->resv)); +#endif return i915_vma_pin_ww(vma, NULL, size, alignment, flags); } int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u32 align, unsigned int flags); +int i915_ggtt_pin_unlocked(struct i915_vma *vma, u32 align, unsigned int flags); + static inline int i915_vma_pin_count(const struct i915_vma *vma) { return atomic_read(&vma->flags) & I915_VMA_PIN_MASK; -- 2.26.2 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel