From: Thomas Hellström <thomas.hellstrom@xxxxxxxxx> Stolen objects need to lock, and we may call put_pages when refcount drops to 0, ensure all calls are handled correctly. Idea-from: Thomas Hellström <thomas.hellstrom@xxxxxxxxx> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx> Signed-off-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_object.h | 13 +++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_pages.c | 14 ++++++++++++-- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 10 +++++++++- 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index da7fd301fc8d..26ef37532f81 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -125,6 +125,19 @@ i915_gem_object_put(struct drm_i915_gem_object *obj) ((kref_read(&obj->base.refcount) == 1) && \ list_empty_careful(&obj->mm.link) && \ list_empty_careful(&obj->vma.list)))) +/* + * If more than one potential simultaneous locker, assert held. + */ +static inline void assert_object_held_shared(struct drm_i915_gem_object *obj) +{ + /* + * Note mm list lookup is protected by + * kref_get_unless_zero(). + */ + if (IS_ENABLED(CONFIG_LOCKDEP) && + kref_read(&obj->base.refcount) > 0) + lockdep_assert_held(&obj->mm.lock); +} static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index b03e58106516..183aae046b68 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -18,7 +18,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, unsigned long supported = INTEL_INFO(i915)->page_sizes; int i; - lockdep_assert_held(&obj->mm.lock); + assert_object_held_shared(obj); if (i915_gem_object_is_volatile(obj)) obj->mm.madv = I915_MADV_DONTNEED; @@ -67,6 +67,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct list_head *list; unsigned long flags; + lockdep_assert_held(&obj->mm.lock); spin_lock_irqsave(&i915->mm.obj_lock, flags); i915->mm.shrink_count++; @@ -88,6 +89,8 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); int err; + assert_object_held_shared(obj); + if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { drm_dbg(&i915->drm, "Attempting to obtain a purgeable object\n"); @@ -115,6 +118,8 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) if (err) return err; + assert_object_held_shared(obj); + if (unlikely(!i915_gem_object_has_pages(obj))) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); @@ -142,7 +147,7 @@ void i915_gem_object_truncate(struct drm_i915_gem_object *obj) /* Try to discard unwanted pages */ void i915_gem_object_writeback(struct drm_i915_gem_object *obj) { - lockdep_assert_held(&obj->mm.lock); + assert_object_held_shared(obj); GEM_BUG_ON(i915_gem_object_has_pages(obj)); if (obj->ops->writeback) @@ -173,6 +178,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { struct sg_table *pages; + assert_object_held_shared(obj); + pages = fetch_and_zero(&obj->mm.pages); if (IS_ERR_OR_NULL(pages)) return pages; @@ -200,6 +207,9 @@ int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj) if (i915_gem_object_has_pinned_pages(obj)) return -EBUSY; + /* May be called by shrinker from within get_pages() (on another bo) */ + assert_object_held_shared(obj); + i915_gem_object_release_mmap_offset(obj); /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 5372b888ba01..ce9086d3a647 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -643,11 +643,19 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem, cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); + if (WARN_ON(!i915_gem_object_trylock(obj))) { + err = -EBUSY; + goto cleanup; + } + err = i915_gem_object_pin_pages(obj); - if (err) + if (err) { + i915_gem_object_unlock(obj); goto cleanup; + } i915_gem_object_init_memory_region(obj, mem); + i915_gem_object_unlock(obj); return obj; -- 2.26.2 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx