If we purge the object from the shrinker, it is no longer accessible and so we can reclaim it from the per-context lookup caches. For example, if the client is leaking objects (but leaving as I915_MADV_DONTNEED) then we still end up with significant mempressure from the unreclaimed radixtrees and slabs. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_shrinker.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index fb5c10d25bf3..2a272045cf4d 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -116,6 +116,26 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) return !i915_gem_object_has_pages(obj); } +static void close_object(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_lut_handle *lut, *ln; + + list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { + struct i915_vma *vma; + + vma = radix_tree_delete(&lut->ctx->handles_vma, lut->handle); + if (!--vma->open_count && !i915_vma_is_ggtt(vma)) + i915_vma_close(vma); + + list_del(&lut->ctx_link); + + kmem_cache_free(i915->luts, lut); + i915_gem_object_put(obj); + } + INIT_LIST_HEAD(&obj->lut_list); +} + static void __start_writeback(struct drm_i915_gem_object *obj, unsigned int flags) { @@ -136,6 +156,7 @@ static void __start_writeback(struct drm_i915_gem_object *obj, case I915_MADV_DONTNEED: __i915_gem_object_truncate(obj); case __I915_MADV_PURGED: + close_object(obj); return; } -- 2.14.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx