Quoting Matthew Auld (2019-06-27 21:56:00) > Some objects may need to be allocated as a continuous block, thinking > ahead the various kernel io_mapping interfaces seem to expect it. > > Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> > Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> > Cc: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx> > --- > .../gpu/drm/i915/gem/i915_gem_object_types.h | 4 + > drivers/gpu/drm/i915/intel_memory_region.c | 7 +- > .../drm/i915/selftests/intel_memory_region.c | 152 +++++++++++++++++- > drivers/gpu/drm/i915/selftests/mock_region.c | 3 + > 4 files changed, 160 insertions(+), 6 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > index 87000fc24ab3..1c4b99e507c3 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > @@ -133,6 +133,10 @@ struct drm_i915_gem_object { > struct list_head batch_pool_link; > I915_SELFTEST_DECLARE(struct list_head st_link); > > + unsigned long flags; > +#define I915_BO_ALLOC_CONTIGUOUS (1<<0) BIT(0) > +#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS) > + > /* > * Is the object to be mapped as read-only to the GPU > * Only honoured if hardware has relevant pte bit > diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c > index 721b47e46492..9b6a32bfa20d 100644 > --- a/drivers/gpu/drm/i915/intel_memory_region.c > +++ b/drivers/gpu/drm/i915/intel_memory_region.c > @@ -90,6 +90,7 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj) > { > struct intel_memory_region *mem = obj->memory_region; > resource_size_t size = obj->base.size; > + unsigned int flags = obj->flags; Was unsigned long. > struct sg_table *st; > struct scatterlist *sg; > unsigned int sg_page_sizes; > @@ -130,7 +131,7 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj) > if (!IS_ERR(block)) > break; > > - if (!order--) { > + if (flags & I915_BO_ALLOC_CONTIGUOUS || !order--) { > resource_size_t target; > int err; > > @@ -219,6 +220,9 @@ i915_gem_object_create_region(struct intel_memory_region *mem, > if (!mem) > return ERR_PTR(-ENODEV); > > + if (flags & ~I915_BO_ALLOC_FLAGS) > + return ERR_PTR(-EINVAL); > + > size = round_up(size, mem->min_page_size); > > GEM_BUG_ON(!size); > @@ -236,6 +240,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem, > > INIT_LIST_HEAD(&obj->blocks); > obj->memory_region = mem; > + obj->flags = flags; > > mutex_lock(&mem->obj_lock); > list_add(&obj->region_link, &mem->objects); > diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c > index ece499869747..c9de8b5039e4 100644 > --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c > +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c > @@ -78,17 +78,17 @@ static int igt_mock_fill(void *arg) > > static void igt_mark_evictable(struct drm_i915_gem_object *obj) > { > - i915_gem_object_unpin_pages(obj); > + if (i915_gem_object_has_pinned_pages(obj)) > + i915_gem_object_unpin_pages(obj); > obj->mm.madv = I915_MADV_DONTNEED; > list_move(&obj->region_link, &obj->memory_region->purgeable); > } > > -static int igt_mock_evict(void *arg) > +static int igt_frag_region(struct intel_memory_region *mem, > + struct list_head *objects) > { > - struct intel_memory_region *mem = arg; > struct drm_i915_gem_object *obj; > unsigned long n_objects; > - LIST_HEAD(objects); > resource_size_t target; > resource_size_t total; > int err = 0; > @@ -104,7 +104,7 @@ static int igt_mock_evict(void *arg) > goto err_close_objects; > } > > - list_add(&obj->st_link, &objects); > + list_add(&obj->st_link, objects); > > err = i915_gem_object_pin_pages(obj); > if (err) > @@ -118,6 +118,39 @@ static int igt_mock_evict(void *arg) > igt_mark_evictable(obj); > } > > + return 0; > + > +err_close_objects: > + close_objects(objects); > + return err; > +} > + > +static void igt_defrag_region(struct list_head *objects) > +{ > + struct drm_i915_gem_object *obj; > + > + list_for_each_entry(obj, objects, st_link) { > + if (obj->mm.madv == I915_MADV_WILLNEED) > + igt_mark_evictable(obj); > + } > +} > + > +static int igt_mock_evict(void *arg) > +{ > + struct intel_memory_region *mem = arg; > + struct drm_i915_gem_object *obj; > + LIST_HEAD(objects); > + resource_size_t target; > + resource_size_t total; > + int err; > + > + err = igt_frag_region(mem, &objects); > + if (err) > + return err; > + > + total = resource_size(&mem->region); > + target = mem->mm.min_size; > + > while (target <= total / 2) { > obj = i915_gem_object_create_region(mem, target, 0); > if (IS_ERR(obj)) { > @@ -148,11 +181,120 @@ static int igt_mock_evict(void *arg) > return err; > } > > +static int igt_mock_continuous(void *arg) > +{ > + struct intel_memory_region *mem = arg; > + struct drm_i915_gem_object *obj; > + LIST_HEAD(objects); > + resource_size_t target; > + resource_size_t total; > + int err; > + > + err = igt_frag_region(mem, &objects); > + if (err) > + return err; > + > + total = resource_size(&mem->region); > + target = total / 2; > + > + /* > + * Sanity check that we can allocate all of the available fragmented > + * space. > + */ > + obj = i915_gem_object_create_region(mem, target, 0); > + if (IS_ERR(obj)) { > + err = PTR_ERR(obj); > + goto err_close_objects; > + } > + > + list_add(&obj->st_link, &objects); > + > + err = i915_gem_object_pin_pages(obj); > + if (err) { > + pr_err("failed to allocate available space\n"); > + goto err_close_objects; > + } > + > + igt_mark_evictable(obj); > + > + /* Try the smallest possible size -- should succeed */ > + obj = i915_gem_object_create_region(mem, mem->mm.min_size, > + I915_BO_ALLOC_CONTIGUOUS); > + if (IS_ERR(obj)) { > + err = PTR_ERR(obj); > + goto err_close_objects; > + } > + > + list_add(&obj->st_link, &objects); > + > + err = i915_gem_object_pin_pages(obj); > + if (err) { > + pr_err("failed to allocate smallest possible size\n"); > + goto err_close_objects; > + } > + > + igt_mark_evictable(obj); > + > + if (obj->mm.pages->nents != 1) { > + pr_err("[1]object spans multiple sg entries\n"); > + err = -EINVAL; > + goto err_close_objects; > + } > + > + /* > + * Even though there is enough free space for the allocation, we > + * shouldn't be able to allocate it, given that it is fragmented, and > + * non-continuous. > + */ > + obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS); > + if (IS_ERR(obj)) { > + err = PTR_ERR(obj); > + goto err_close_objects; > + } > + > + list_add(&obj->st_link, &objects); > + > + err = i915_gem_object_pin_pages(obj); > + if (!err) { > + pr_err("expected allocation to fail\n"); > + err = -EINVAL; > + goto err_close_objects; > + } > + > + igt_defrag_region(&objects); > + > + /* Should now succeed */ > + obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS); > + if (IS_ERR(obj)) { > + err = PTR_ERR(obj); > + goto err_close_objects; > + } > + > + list_add(&obj->st_link, &objects); > + > + err = i915_gem_object_pin_pages(obj); > + if (err) { > + pr_err("failed to allocate from defraged area\n"); > + goto err_close_objects; > + } > + > + if (obj->mm.pages->nents != 1) { > + pr_err("object spans multiple sg entries\n"); > + err = -EINVAL; > + } > + > +err_close_objects: > + close_objects(&objects); > + > + return err; > +} > + > int intel_memory_region_mock_selftests(void) > { > static const struct i915_subtest tests[] = { > SUBTEST(igt_mock_fill), > SUBTEST(igt_mock_evict), > + SUBTEST(igt_mock_continuous), > }; > struct intel_memory_region *mem; > struct drm_i915_private *i915; > diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c > index 80eafdc54927..9eeda8f45f38 100644 > --- a/drivers/gpu/drm/i915/selftests/mock_region.c > +++ b/drivers/gpu/drm/i915/selftests/mock_region.c > @@ -20,6 +20,9 @@ mock_object_create(struct intel_memory_region *mem, > struct drm_i915_gem_object *obj; > unsigned int cache_level; > > + if (flags & I915_BO_ALLOC_CONTIGUOUS) > + size = roundup_pow_of_two(size); > + > if (size > BIT(mem->mm.max_order) * mem->mm.min_size) > return ERR_PTR(-E2BIG); > > -- > 2.20.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx