On Thu, 14 Feb 2019 at 15:25, Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> wrote: > > Quoting Matthew Auld (2019-02-14 14:57:03) > > Support basic eviction for regions. > > > > Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> > > Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> > > Cc: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx> > > --- > > drivers/gpu/drm/i915/i915_drv.h | 2 + > > drivers/gpu/drm/i915/i915_gem.c | 16 ++++ > > drivers/gpu/drm/i915/i915_gem_object.h | 7 ++ > > drivers/gpu/drm/i915/i915_gem_shrinker.c | 59 ++++++++++++++ > > drivers/gpu/drm/i915/intel_memory_region.c | 40 +++++++++- > > drivers/gpu/drm/i915/intel_memory_region.h | 7 ++ > > .../drm/i915/selftests/intel_memory_region.c | 76 +++++++++++++++++++ > > drivers/gpu/drm/i915/selftests/mock_region.c | 1 + > > 8 files changed, 204 insertions(+), 4 deletions(-) > > > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > > index 0bea7d889284..3df27769b978 100644 > > --- a/drivers/gpu/drm/i915/i915_drv.h > > +++ b/drivers/gpu/drm/i915/i915_drv.h > > @@ -3196,6 +3196,8 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915); > > void i915_gem_shrinker_unregister(struct drm_i915_private *i915); > > void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, > > struct mutex *mutex); > > +int i915_gem_shrink_memory_region(struct intel_memory_region *mem, > > + resource_size_t target); > > > > /* i915_gem_tiling.c */ > > static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > > index 92768ab294a4..7f044b643a75 100644 > > --- a/drivers/gpu/drm/i915/i915_gem.c > > +++ b/drivers/gpu/drm/i915/i915_gem.c > > @@ -4095,6 +4095,22 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, > > !i915_gem_object_has_pages(obj)) > > i915_gem_object_truncate(obj); > > > > + if (obj->memory_region) { > > + mutex_lock(&obj->memory_region->obj_lock); > > + > > + switch (obj->mm.madv) { > > + case I915_MADV_WILLNEED: > > + list_move(&obj->region_link, &obj->memory_region->objects); > > + break; > > + default: > > + list_move(&obj->region_link, > > + &obj->memory_region->purgeable); > > + break; > > + } > > + > > + mutex_unlock(&obj->memory_region->obj_lock); > > + } > > + > > args->retained = obj->mm.madv != __I915_MADV_PURGED; > > mutex_unlock(&obj->mm.lock); > > > > diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h > > index ac52f61e8ad1..76947a6f49f1 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_object.h > > +++ b/drivers/gpu/drm/i915/i915_gem_object.h > > @@ -95,6 +95,13 @@ struct drm_i915_gem_object { > > * List of memory region blocks allocated for this object. > > */ > > struct list_head blocks; > > + /** > > + * Element within memory_region->objects or memory_region->purgeable if > > + * the object is marked as DONTNEED. Access is protected by > > + * memory_region->obj_lock. > > Lies. ;-p > > > + */ > > + struct list_head region_link; > > + struct list_head tmp_link; > > > > struct { > > /** > > diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c > > index 6da795c7e62e..713c6c93cf30 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c > > +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c > > @@ -308,6 +308,65 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) > > return freed; > > } > > > > +int i915_gem_shrink_memory_region(struct intel_memory_region *mem, > > + resource_size_t target) > > If it's not going to be coupled into the mm.shrinker callback, do not put > it here! And there's no reason why we would ever couple local memory to > the generic mm shrinker! Yup. > > > +{ > > + struct drm_i915_private *i915 = mem->i915; > > + struct drm_i915_gem_object *obj, *on; > > + resource_size_t found; > > + LIST_HEAD(purgeable); > > + bool unlock; > > + int err; > > + > > + if (!shrinker_lock(i915, 0, &unlock)) > > + return 0; > > Don't... > > > + > > + i915_retire_requests(i915); > > And this, don't do this. > > > + err = 0; > > + found = 0; > > + > > + mutex_lock(&mem->obj_lock); > > That's all the top-level locking we should ever need. > > > + list_for_each_entry(obj, &mem->purgeable, region_link) { > > + if (!i915_gem_object_has_pages(obj)) > > + continue; > > + > > + if (READ_ONCE(obj->pin_global)) > > + continue; > > + > > + if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) > > + continue; > > + > > + list_add(&obj->tmp_link, &purgeable); > > Oh crikey. What's the crikey for? We do the purging in two passes? Yeah, I guess that's kinda garbage. There is definitely some leftover baggage from when we did "interesting" things in here, which needs to be fixed up. > > > + > > + found += obj->base.size; > > + if (found >= target) > > + goto found; > > + } > > + > > + err = -ENOSPC; > > +found: > > + mutex_unlock(&mem->obj_lock); > > + > > + list_for_each_entry_safe(obj, on, &purgeable, tmp_link) { > > + if (!err) > > + err = i915_gem_object_unbind(obj); > > I would advise not to worry about unbinding until you have it decoupled > from struct_mutex. Or at least defer struct_mutex until you truly need > it to access the vma, so that it doesn't get abused for anything else. Ok, I'll just drop the unbind, and kiss for now. > > > + if (!err) { > > + __i915_gem_object_put_pages(obj, > > + I915_MM_SHRINKER); > > + if (!i915_gem_object_has_pages(obj)) > > + obj->mm.madv = __I915_MADV_PURGED; > > But this is racy. > > > + } > > + > > + list_del(&obj->tmp_link); > > I'm still going crikey. That's no excuse to invoke struct_mutex. > -Chris > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx