The intent is to be update the mm.lists from inside a irqsoff section (e.g. from a softirq rcu workqueue), ergo we need to make the mm.obj_lock irqsafe. Fixes: 3b4fa9640ccd ("drm/i915: Track the purgeable objects on a separate eviction list") Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> Cc: Matthew Auld <matthew.william.auld@xxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 8 ++++++-- drivers/gpu/drm/i915/gem/i915_gem_object.c | 12 ++++++++---- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 13 +++++++++---- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 14 ++++++++------ drivers/gpu/drm/i915/i915_gem.c | 8 ++++++-- drivers/gpu/drm/i915/i915_vma.c | 7 +++++-- 6 files changed, 42 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 6115109a2810..bd180ef46aeb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -476,10 +476,14 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) mutex_unlock(&i915->ggtt.vm.mutex); if (i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + if (obj->mm.madv == I915_MADV_WILLNEED) list_move_tail(&obj->mm.link, &i915->mm.shrink_list); - spin_unlock(&i915->mm.obj_lock); + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 7a07e726ec83..03725ca42cc7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -207,9 +207,11 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, */ if (i915_gem_object_has_pages(obj) && i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); list_del_init(&obj->mm.link); - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } mutex_unlock(&i915->drm.struct_mutex); @@ -331,9 +333,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) if (i915_gem_object_has_pages(obj) && i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); list_move_tail(&obj->mm.link, &i915->mm.purge_list); - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7868dd48d931..b36ad269f4ea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -58,8 +58,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, if (i915_gem_object_is_shrinkable(obj)) { struct list_head *list; + unsigned long flags; - spin_lock(&i915->mm.obj_lock); + spin_lock_irqsave(&i915->mm.obj_lock, flags); i915->mm.shrink_count++; i915->mm.shrink_memory += obj->base.size; @@ -70,7 +71,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, list = &i915->mm.shrink_list; list_add_tail(&obj->mm.link, list); - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } @@ -160,11 +161,15 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) return pages; if (i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + list_del(&obj->mm.link); i915->mm.shrink_count--; i915->mm.shrink_memory -= obj->base.size; - spin_unlock(&i915->mm.obj_lock); + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } if (obj->mm.mapping) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 1e7f48db7b3e..88e63afd1d3d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -209,6 +209,7 @@ i915_gem_shrink(struct drm_i915_private *i915, for (phase = phases; phase->list; phase++) { struct list_head still_in_list; struct drm_i915_gem_object *obj; + unsigned long flags; if ((flags & phase->bit) == 0) continue; @@ -222,7 +223,7 @@ i915_gem_shrink(struct drm_i915_private *i915, * to be able to shrink their pages, so they remain on * the unbound/bound list until actually freed. */ - spin_lock(&i915->mm.obj_lock); + spin_lock_irqsave(&i915->mm.obj_lock, flags); while (count < target && (obj = list_first_entry_or_null(phase->list, typeof(*obj), @@ -245,7 +246,7 @@ i915_gem_shrink(struct drm_i915_private *i915, if (!can_release_pages(obj)) continue; - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (unsafe_drop_pages(obj)) { /* May arrive from get_pages on another bo */ @@ -259,10 +260,10 @@ i915_gem_shrink(struct drm_i915_private *i915, } scanned += obj->base.size >> PAGE_SHIFT; - spin_lock(&i915->mm.obj_lock); + spin_lock_irqsave(&i915->mm.obj_lock, flags); } list_splice_tail(&still_in_list, phase->list); - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } if (flags & I915_SHRINK_BOUND) @@ -381,6 +382,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) struct drm_i915_gem_object *obj; unsigned long unevictable, available, freed_pages; intel_wakeref_t wakeref; + unsigned long flags; freed_pages = 0; with_intel_runtime_pm(i915, wakeref) @@ -394,14 +396,14 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) * being pointed to by hardware. */ available = unevictable = 0; - spin_lock(&i915->mm.obj_lock); + spin_lock_irqsave(&i915->mm.obj_lock, flags); list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { if (!can_release_pages(obj)) unevictable += obj->base.size >> PAGE_SHIFT; else available += obj->base.size >> PAGE_SHIFT; } - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (freed_pages || available) pr_info("Purging GPU memory, %lu pages freed, " diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0aeca42cb061..25f6a2c3139e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1140,13 +1140,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct list_head *list; if (i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + if (obj->mm.madv != I915_MADV_WILLNEED) list = &i915->mm.purge_list; else list = &i915->mm.shrink_list; list_move_tail(&obj->mm.link, list); - spin_unlock(&i915->mm.obj_lock); + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a3cb08f602f9..5c075cd6f9fc 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -80,10 +80,13 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) static void obj_bump_mru(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); - spin_lock(&i915->mm.obj_lock); list_move_tail(&obj->mm.link, &i915->mm.shrink_list); - spin_unlock(&i915->mm.obj_lock); + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); obj->mm.dirty = true; /* be paranoid */ } -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx