On Thu, Jun 27, 2019 at 09:55:59PM +0100, Matthew Auld wrote: > Support basic eviction for regions. > > Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> > Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> > Cc: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx> So from a very high level this looks like it was largely modelled after i915_gem_shrink.c and not i915_gem_evict.c (our other "make room, we're running out of stuff" code). Any specific reasons? I think i915_gem_evict is a lot closer match for what we want for vram (it started out to manage severely limitted GTT on gen2/3/4) after all. With the complication that we'll have to manage physical memory with multiple virtual mappings of it on top, so unfortunately we can't just reuse the locking patter Chris has come up with in his struct_mutex-removal branch. But at least conceptually it should be a lot closer. But I might be entirely off the track with reconstructing how this code came to be, so please elaborate a bit. Thanks, Daniel > --- > .../gpu/drm/i915/gem/i915_gem_object_types.h | 7 ++ > drivers/gpu/drm/i915/i915_gem.c | 16 ++++ > drivers/gpu/drm/i915/intel_memory_region.c | 89 ++++++++++++++++++- > drivers/gpu/drm/i915/intel_memory_region.h | 10 +++ > .../drm/i915/selftests/intel_memory_region.c | 73 +++++++++++++++ > drivers/gpu/drm/i915/selftests/mock_region.c | 1 + > 6 files changed, 192 insertions(+), 4 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > index 8d760e852c4b..87000fc24ab3 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > @@ -72,6 +72,13 @@ struct drm_i915_gem_object { > * List of memory region blocks allocated for this object. > */ > struct list_head blocks; > + /** > + * Element within memory_region->objects or memory_region->purgeable if > + * the object is marked as DONTNEED. Access is protected by > + * memory_region->obj_lock. > + */ > + struct list_head region_link; > + struct list_head eviction_link; > > struct { > /** > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index db3744b0bc80..85677ae89849 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -1122,6 +1122,22 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, > !i915_gem_object_has_pages(obj)) > i915_gem_object_truncate(obj); > > + if (obj->memory_region) { > + mutex_lock(&obj->memory_region->obj_lock); > + > + switch (obj->mm.madv) { > + case I915_MADV_WILLNEED: > + list_move(&obj->region_link, &obj->memory_region->objects); > + break; > + default: > + list_move(&obj->region_link, > + &obj->memory_region->purgeable); > + break; > + } > + > + mutex_unlock(&obj->memory_region->obj_lock); > + } > + > args->retained = obj->mm.madv != __I915_MADV_PURGED; > mutex_unlock(&obj->mm.lock); > > diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c > index 4c89853a7769..721b47e46492 100644 > --- a/drivers/gpu/drm/i915/intel_memory_region.c > +++ b/drivers/gpu/drm/i915/intel_memory_region.c > @@ -6,6 +6,56 @@ > #include "intel_memory_region.h" > #include "i915_drv.h" > > +int i915_memory_region_evict(struct intel_memory_region *mem, > + resource_size_t target) > +{ > + struct drm_i915_gem_object *obj, *on; > + resource_size_t found; > + LIST_HEAD(purgeable); > + int err; > + > + err = 0; > + found = 0; > + > + mutex_lock(&mem->obj_lock); > + > + list_for_each_entry(obj, &mem->purgeable, region_link) { > + if (!i915_gem_object_has_pages(obj)) > + continue; > + > + if (READ_ONCE(obj->pin_global)) > + continue; > + > + if (atomic_read(&obj->bind_count)) > + continue; > + > + list_add(&obj->eviction_link, &purgeable); > + > + found += obj->base.size; > + if (found >= target) > + goto found; > + } > + > + err = -ENOSPC; > +found: > + list_for_each_entry_safe(obj, on, &purgeable, eviction_link) { > + if (!err) { > + __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); > + > + mutex_lock_nested(&obj->mm.lock, I915_MM_SHRINKER); > + if (!i915_gem_object_has_pages(obj)) > + obj->mm.madv = __I915_MADV_PURGED; > + mutex_unlock(&obj->mm.lock); > + } > + > + list_del(&obj->eviction_link); > + } > + > + mutex_unlock(&mem->obj_lock); > + > + return err; > +} > + > static void > memory_region_free_pages(struct drm_i915_gem_object *obj, > struct sg_table *pages) > @@ -70,7 +120,8 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj) > unsigned int order; > u64 block_size; > u64 offset; > - > + bool retry = true; > +retry: > order = fls(n_pages) - 1; > GEM_BUG_ON(order > mem->mm.max_order); > > @@ -79,9 +130,24 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj) > if (!IS_ERR(block)) > break; > > - /* XXX: some kind of eviction pass, local to the device */ > - if (!order--) > - goto err_free_blocks; > + if (!order--) { > + resource_size_t target; > + int err; > + > + if (!retry) > + goto err_free_blocks; > + > + target = n_pages * mem->mm.min_size; > + > + mutex_unlock(&mem->mm_lock); > + err = i915_memory_region_evict(mem, target); > + mutex_lock(&mem->mm_lock); > + if (err) > + goto err_free_blocks; > + > + retry = false; > + goto retry; > + } > } while (1); > > n_pages -= BIT(order); > @@ -136,6 +202,13 @@ void i915_memory_region_release_buddy(struct intel_memory_region *mem) > i915_buddy_fini(&mem->mm); > } > > +void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) > +{ > + mutex_lock(&obj->memory_region->obj_lock); > + list_del(&obj->region_link); > + mutex_unlock(&obj->memory_region->obj_lock); > +} > + > struct drm_i915_gem_object * > i915_gem_object_create_region(struct intel_memory_region *mem, > resource_size_t size, > @@ -164,6 +237,10 @@ i915_gem_object_create_region(struct intel_memory_region *mem, > INIT_LIST_HEAD(&obj->blocks); > obj->memory_region = mem; > > + mutex_lock(&mem->obj_lock); > + list_add(&obj->region_link, &mem->objects); > + mutex_unlock(&mem->obj_lock); > + > return obj; > } > > @@ -188,6 +265,10 @@ intel_memory_region_create(struct drm_i915_private *i915, > mem->min_page_size = min_page_size; > mem->ops = ops; > > + mutex_init(&mem->obj_lock); > + INIT_LIST_HEAD(&mem->objects); > + INIT_LIST_HEAD(&mem->purgeable); > + > mutex_init(&mem->mm_lock); > > if (ops->init) { > diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h > index 8d4736bdde50..bee0c022d295 100644 > --- a/drivers/gpu/drm/i915/intel_memory_region.h > +++ b/drivers/gpu/drm/i915/intel_memory_region.h > @@ -80,8 +80,16 @@ struct intel_memory_region { > unsigned int type; > unsigned int instance; > unsigned int id; > + > + /* Protects access to objects and purgeable */ > + struct mutex obj_lock; > + struct list_head objects; > + struct list_head purgeable; > }; > > +int i915_memory_region_evict(struct intel_memory_region *mem, > + resource_size_t target); > + > int i915_memory_region_init_buddy(struct intel_memory_region *mem); > void i915_memory_region_release_buddy(struct intel_memory_region *mem); > > @@ -89,6 +97,8 @@ int i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj); > void i915_memory_region_put_pages_buddy(struct drm_i915_gem_object *obj, > struct sg_table *pages); > > +void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj); > + > struct intel_memory_region * > intel_memory_region_create(struct drm_i915_private *i915, > resource_size_t start, > diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c > index c3b160cfd713..ece499869747 100644 > --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c > +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c > @@ -76,10 +76,83 @@ static int igt_mock_fill(void *arg) > return err; > } > > +static void igt_mark_evictable(struct drm_i915_gem_object *obj) > +{ > + i915_gem_object_unpin_pages(obj); > + obj->mm.madv = I915_MADV_DONTNEED; > + list_move(&obj->region_link, &obj->memory_region->purgeable); > +} > + > +static int igt_mock_evict(void *arg) > +{ > + struct intel_memory_region *mem = arg; > + struct drm_i915_gem_object *obj; > + unsigned long n_objects; > + LIST_HEAD(objects); > + resource_size_t target; > + resource_size_t total; > + int err = 0; > + > + target = mem->mm.min_size; > + total = resource_size(&mem->region); > + n_objects = total / target; > + > + while (n_objects--) { > + obj = i915_gem_object_create_region(mem, target, 0); > + if (IS_ERR(obj)) { > + err = PTR_ERR(obj); > + goto err_close_objects; > + } > + > + list_add(&obj->st_link, &objects); > + > + err = i915_gem_object_pin_pages(obj); > + if (err) > + goto err_close_objects; > + > + /* > + * Make half of the region evictable, though do so in a > + * horribly fragmented fashion. > + */ > + if (n_objects % 2) > + igt_mark_evictable(obj); > + } > + > + while (target <= total / 2) { > + obj = i915_gem_object_create_region(mem, target, 0); > + if (IS_ERR(obj)) { > + err = PTR_ERR(obj); > + goto err_close_objects; > + } > + > + list_add(&obj->st_link, &objects); > + > + err = i915_gem_object_pin_pages(obj); > + if (err) { > + pr_err("failed to evict for target=%pa", &target); > + goto err_close_objects; > + } > + > + /* Again, half of the region should remain evictable */ > + igt_mark_evictable(obj); > + > + target <<= 1; > + } > + > +err_close_objects: > + close_objects(&objects); > + > + if (err == -ENOMEM) > + err = 0; > + > + return err; > +} > + > int intel_memory_region_mock_selftests(void) > { > static const struct i915_subtest tests[] = { > SUBTEST(igt_mock_fill), > + SUBTEST(igt_mock_evict), > }; > struct intel_memory_region *mem; > struct drm_i915_private *i915; > diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c > index cb942a461e9d..80eafdc54927 100644 > --- a/drivers/gpu/drm/i915/selftests/mock_region.c > +++ b/drivers/gpu/drm/i915/selftests/mock_region.c > @@ -8,6 +8,7 @@ > static const struct drm_i915_gem_object_ops mock_region_obj_ops = { > .get_pages = i915_memory_region_get_pages_buddy, > .put_pages = i915_memory_region_put_pages_buddy, > + .release = i915_gem_object_release_memory_region, > }; > > static struct drm_i915_gem_object * > -- > 2.20.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx