Some objects may need to be allocated as a continuous block, thinking ahead the various kernel io_mapping interfaces seem to expect it. Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> Cc: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_object.h | 2 + drivers/gpu/drm/i915/intel_memory_region.c | 7 +- .../drm/i915/selftests/intel_memory_region.c | 152 +++++++++++++++++- drivers/gpu/drm/i915/selftests/mock_region.c | 3 + 4 files changed, 158 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 76947a6f49f1..efc23fc2b1df 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -163,6 +163,8 @@ struct drm_i915_gem_object { * activity? */ #define I915_BO_ACTIVE_REF 0 +#define I915_BO_ALLOC_CONTIGUOUS (1<<1) +#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS) /* * Is the object to be mapped as read-only to the GPU diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index f7fdc3e942e6..ed37599c7784 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -59,6 +59,7 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj) { struct intel_memory_region *mem = obj->memory_region; resource_size_t size = obj->base.size; + unsigned int flags = obj->flags; struct sg_table *st; struct scatterlist *sg; unsigned int sg_page_sizes; @@ -99,7 +100,7 @@ i915_memory_region_get_pages_buddy(struct drm_i915_gem_object *obj) if (!IS_ERR(block)) break; - if (!order--) { + if (flags & I915_BO_ALLOC_CONTIGUOUS || !order--) { resource_size_t target; int err; @@ -185,6 +186,9 @@ i915_gem_object_create_region(struct intel_memory_region *mem, if (!mem) return ERR_PTR(-ENODEV); + if (flags & ~I915_BO_ALLOC_FLAGS) + return ERR_PTR(-EINVAL); + size = round_up(size, mem->min_page_size); GEM_BUG_ON(!size); @@ -202,6 +206,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem, INIT_LIST_HEAD(&obj->blocks); obj->memory_region = mem; + obj->flags = flags; mutex_lock(&mem->obj_lock); list_add(&obj->region_link, &mem->objects); diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 1cea381d2d5e..fbb1497d0f82 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -95,17 +95,17 @@ static int igt_mock_fill(void *arg) static void igt_mark_evictable(struct drm_i915_gem_object *obj) { - i915_gem_object_unpin_pages(obj); + if (i915_gem_object_has_pinned_pages(obj)) + i915_gem_object_unpin_pages(obj); obj->mm.madv = I915_MADV_DONTNEED; list_move(&obj->region_link, &obj->memory_region->purgeable); } -static int igt_mock_shrink(void *arg) +static int igt_frag_region(struct intel_memory_region *mem, + struct list_head *objects) { - struct intel_memory_region *mem = arg; struct drm_i915_gem_object *obj; unsigned long n_objects; - LIST_HEAD(objects); resource_size_t target; resource_size_t total; int err = 0; @@ -123,7 +123,7 @@ static int igt_mock_shrink(void *arg) goto err_close_objects; } - list_add(&obj->st_link, &objects); + list_add(&obj->st_link, objects); err = i915_gem_object_pin_pages(obj); if (err) @@ -137,6 +137,39 @@ static int igt_mock_shrink(void *arg) igt_mark_evictable(obj); } + return 0; + +err_close_objects: + close_objects(objects); + return err; +} + +static void igt_defrag_region(struct list_head *objects) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, objects, st_link) { + if (obj->mm.madv == I915_MADV_WILLNEED) + igt_mark_evictable(obj); + } +} + +static int igt_mock_shrink(void *arg) +{ + struct intel_memory_region *mem = arg; + struct drm_i915_gem_object *obj; + LIST_HEAD(objects); + resource_size_t target; + resource_size_t total; + int err; + + err = igt_frag_region(mem, &objects); + if (err) + return err; + + total = resource_size(&mem->region); + target = mem->mm.min_size; + while (target <= total / 2) { obj = i915_gem_object_create_region(mem, target, 0); if (IS_ERR(obj)) { @@ -168,11 +201,120 @@ static int igt_mock_shrink(void *arg) return err; } +static int igt_mock_continuous(void *arg) +{ + struct intel_memory_region *mem = arg; + struct drm_i915_gem_object *obj; + LIST_HEAD(objects); + resource_size_t target; + resource_size_t total; + int err; + + err = igt_frag_region(mem, &objects); + if (err) + return err; + + total = resource_size(&mem->region); + target = total / 2; + + /* + * Sanity check that we can allocate all of the available fragmented + * space. + */ + obj = i915_gem_object_create_region(mem, target, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("failed to allocate available space\n"); + goto err_close_objects; + } + + igt_mark_evictable(obj); + + /* Try the smallest possible size -- should succeed */ + obj = i915_gem_object_create_region(mem, mem->mm.min_size, + I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("failed to allocate smallest possible size\n"); + goto err_close_objects; + } + + igt_mark_evictable(obj); + + if (obj->mm.pages->nents != 1) { + pr_err("[1]object spans multiple sg entries\n"); + err = -EINVAL; + goto err_close_objects; + } + + /* + * Even though there is enough free space for the allocation, we + * shouldn't be able to allocate it, given that it is fragmented, and + * non-continuous. + */ + obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (!err) { + pr_err("expected allocation to fail\n"); + err = -EINVAL; + goto err_close_objects; + } + + igt_defrag_region(&objects); + + /* Should now succeed */ + obj = i915_gem_object_create_region(mem, target, I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + list_add(&obj->st_link, &objects); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("failed to allocate from defraged area\n"); + goto err_close_objects; + } + + if (obj->mm.pages->nents != 1) { + pr_err("object spans multiple sg entries\n"); + err = -EINVAL; + } + +err_close_objects: + close_objects(&objects); + + return err; +} + int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_mock_fill), SUBTEST(igt_mock_shrink), + SUBTEST(igt_mock_continuous), }; struct intel_memory_region *mem; struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 11e9f379aaca..c1c09d0e4722 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -38,6 +38,9 @@ mock_object_create(struct intel_memory_region *mem, struct drm_i915_private *i915 = mem->i915; struct drm_i915_gem_object *obj; + if (flags & I915_BO_ALLOC_CONTIGUOUS) + size = roundup_pow_of_two(size); + if (size > BIT(mem->mm.max_order) * mem->mm.min_size) return ERR_PTR(-E2BIG); -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx