>-----Original Message----- >From: Intel-gfx [mailto:intel-gfx-bounces@xxxxxxxxxxxxxxxxxxxxx] On Behalf Of >Matthew Auld >Sent: Friday, September 27, 2019 1:34 PM >To: intel-gfx@xxxxxxxxxxxxxxxxxxxxx >Cc: daniel.vetter@xxxxxxxx >Subject: [PATCH 04/22] drm/i915/region: support continuous >allocations > >Some kernel internal objects may need to be allocated as a continuous Nit: You refer to the "continuous block", but the then you create the "CONTIGUOUS" allocations. s/continuous/contiguous? Mike >block, also thinking ahead the various kernel io_mapping interfaces seem >to expect it, although this is purely a limitation in the kernel >API...so perhaps something to be improved. > >Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> >Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> >Cc: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx> >--- > .../gpu/drm/i915/gem/i915_gem_object_types.h | 4 + > drivers/gpu/drm/i915/gem/i915_gem_region.c | 15 +- > drivers/gpu/drm/i915/gem/i915_gem_region.h | 3 +- > .../gpu/drm/i915/gem/selftests/huge_pages.c | 3 +- > drivers/gpu/drm/i915/intel_memory_region.c | 13 +- > drivers/gpu/drm/i915/intel_memory_region.h | 3 +- > .../drm/i915/selftests/intel_memory_region.c | 163 ++++++++++++++++++ > drivers/gpu/drm/i915/selftests/mock_region.c | 2 +- > 8 files changed, 197 insertions(+), 9 deletions(-) > >diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h >b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h >index d36c860c9c6f..7acd383f174f 100644 >--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h >+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h >@@ -117,6 +117,10 @@ struct drm_i915_gem_object { > > I915_SELFTEST_DECLARE(struct list_head st_link); > >+ unsigned long flags; >+#define I915_BO_ALLOC_CONTIGUOUS BIT(0) >+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS) >+ > /* > * Is the object to be mapped as read-only to the GPU > * Only honoured if hardware has relevant pte bit >diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c >b/drivers/gpu/drm/i915/gem/i915_gem_region.c >index 5c3bfc121921..b317a5c84144 100644 >--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c >+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c >@@ -23,10 +23,10 @@ i915_gem_object_get_pages_buddy(struct >drm_i915_gem_object *obj) > { > struct intel_memory_region *mem = obj->mm.region; > struct list_head *blocks = &obj->mm.blocks; >- unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE; > resource_size_t size = obj->base.size; > resource_size_t prev_end; > struct i915_buddy_block *block; >+ unsigned int flags; > struct sg_table *st; > struct scatterlist *sg; > unsigned int sg_page_sizes; >@@ -42,6 +42,10 @@ i915_gem_object_get_pages_buddy(struct >drm_i915_gem_object *obj) > return -ENOMEM; > } > >+ flags = I915_ALLOC_MIN_PAGE_SIZE; >+ if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) >+ flags |= I915_ALLOC_CONTIGUOUS; >+ > ret = __intel_memory_region_get_pages_buddy(mem, size, flags, >blocks); > if (ret) > goto err_free_sg; >@@ -56,7 +60,8 @@ i915_gem_object_get_pages_buddy(struct >drm_i915_gem_object *obj) > list_for_each_entry(block, blocks, link) { > u64 block_size, offset; > >- block_size = i915_buddy_block_size(&mem->mm, block); >+ block_size = min_t(u64, size, >+ i915_buddy_block_size(&mem->mm, >block)); > offset = i915_buddy_block_offset(block); > > GEM_BUG_ON(overflows_type(block_size, sg->length)); >@@ -98,10 +103,12 @@ i915_gem_object_get_pages_buddy(struct >drm_i915_gem_object *obj) > } > > void i915_gem_object_init_memory_region(struct drm_i915_gem_object >*obj, >- struct intel_memory_region *mem) >+ struct intel_memory_region *mem, >+ unsigned long flags) > { > INIT_LIST_HEAD(&obj->mm.blocks); > obj->mm.region = mem; >+ obj->flags = flags; > } > > void i915_gem_object_release_memory_region(struct >drm_i915_gem_object *obj) >@@ -115,6 +122,8 @@ i915_gem_object_create_region(struct >intel_memory_region *mem, > { > struct drm_i915_gem_object *obj; > >+ GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); >+ > if (!mem) > return ERR_PTR(-ENODEV); > >diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h >b/drivers/gpu/drm/i915/gem/i915_gem_region.h >index ebddc86d78f7..f2ff6f8bff74 100644 >--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h >+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h >@@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct >drm_i915_gem_object *obj, > struct sg_table *pages); > > void i915_gem_object_init_memory_region(struct drm_i915_gem_object >*obj, >- struct intel_memory_region *mem); >+ struct intel_memory_region *mem, >+ unsigned long flags); > void i915_gem_object_release_memory_region(struct >drm_i915_gem_object *obj); > > struct drm_i915_gem_object * >diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c >b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c >index 4e1805aaeb99..f9fbf2865782 100644 >--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c >+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c >@@ -471,7 +471,8 @@ static int igt_mock_memory_region_huge_pages(void >*arg) > unsigned int page_size = BIT(bit); > resource_size_t phys; > >- obj = i915_gem_object_create_region(mem, page_size, 0); >+ obj = i915_gem_object_create_region(mem, page_size, >+ >I915_BO_ALLOC_CONTIGUOUS); > if (IS_ERR(obj)) { > err = PTR_ERR(obj); > goto out_destroy_device; >diff --git a/drivers/gpu/drm/i915/intel_memory_region.c >b/drivers/gpu/drm/i915/intel_memory_region.c >index e48d5c37c4df..7a66872d9eac 100644 >--- a/drivers/gpu/drm/i915/intel_memory_region.c >+++ b/drivers/gpu/drm/i915/intel_memory_region.c >@@ -47,8 +47,8 @@ __intel_memory_region_get_pages_buddy(struct >intel_memory_region *mem, > unsigned int flags, > struct list_head *blocks) > { >- unsigned long n_pages = size >> ilog2(mem->mm.chunk_size); > unsigned int min_order = 0; >+ unsigned long n_pages; > > GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size)); > GEM_BUG_ON(!list_empty(blocks)); >@@ -58,6 +58,13 @@ __intel_memory_region_get_pages_buddy(struct >intel_memory_region *mem, > ilog2(mem->mm.chunk_size); > } > >+ if (flags & I915_ALLOC_CONTIGUOUS) { >+ size = roundup_pow_of_two(size); >+ min_order = ilog2(size) - ilog2(mem->mm.chunk_size); >+ } >+ >+ n_pages = size >> ilog2(mem->mm.chunk_size); >+ > mutex_lock(&mem->mm_lock); > > do { >@@ -104,7 +111,9 @@ __intel_memory_region_get_block_buddy(struct >intel_memory_region *mem, > int ret; > > INIT_LIST_HEAD(&blocks); >- ret = __intel_memory_region_get_pages_buddy(mem, size, 0, >&blocks); >+ ret = __intel_memory_region_get_pages_buddy(mem, size, >+ I915_ALLOC_CONTIGUOUS, >+ &blocks); > if (ret) > return ERR_PTR(ret); > >diff --git a/drivers/gpu/drm/i915/intel_memory_region.h >b/drivers/gpu/drm/i915/intel_memory_region.h >index ae1ce298bcd1..1dad51b2fc96 100644 >--- a/drivers/gpu/drm/i915/intel_memory_region.h >+++ b/drivers/gpu/drm/i915/intel_memory_region.h >@@ -17,7 +17,8 @@ struct drm_i915_gem_object; > struct intel_memory_region; > struct sg_table; > >-#define I915_ALLOC_MIN_PAGE_SIZE BIT(0) >+#define I915_ALLOC_MIN_PAGE_SIZE BIT(0) >+#define I915_ALLOC_CONTIGUOUS BIT(1) > > struct intel_memory_region_ops { > unsigned int flags; >diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c >b/drivers/gpu/drm/i915/selftests/intel_memory_region.c >index 54f9a624b4e1..c43d00ec38ea 100644 >--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c >+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c >@@ -13,6 +13,7 @@ > > #include "gem/i915_gem_region.h" > #include "gem/selftests/mock_context.h" >+#include "selftests/i915_random.h" > > static void close_objects(struct list_head *objects) > { >@@ -89,10 +90,172 @@ static int igt_mock_fill(void *arg) > return err; > } > >+static struct drm_i915_gem_object * >+igt_object_create(struct intel_memory_region *mem, >+ struct list_head *objects, >+ u64 size, >+ unsigned int flags) >+{ >+ struct drm_i915_gem_object *obj; >+ int err; >+ >+ obj = i915_gem_object_create_region(mem, size, flags); >+ if (IS_ERR(obj)) >+ return obj; >+ >+ err = i915_gem_object_pin_pages(obj); >+ if (err) >+ goto put; >+ >+ list_add(&obj->st_link, objects); >+ return obj; >+ >+put: >+ i915_gem_object_put(obj); >+ return ERR_PTR(err); >+} >+ >+void igt_object_release(struct drm_i915_gem_object *obj) >+{ >+ i915_gem_object_unpin_pages(obj); >+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL); >+ i915_gem_object_put(obj); >+ list_del(&obj->st_link); >+} >+ >+static int igt_mock_continuous(void *arg) >+{ >+ struct intel_memory_region *mem = arg; >+ struct drm_i915_gem_object *obj; >+ unsigned long n_objects; >+ LIST_HEAD(objects); >+ LIST_HEAD(holes); >+ I915_RND_STATE(prng); >+ resource_size_t target; >+ resource_size_t total; >+ resource_size_t min; >+ int err = 0; >+ >+ total = resource_size(&mem->region); >+ >+ /* Min size */ >+ obj = igt_object_create(mem, &objects, mem->mm.chunk_size, >+ I915_BO_ALLOC_CONTIGUOUS); >+ if (IS_ERR(obj)) >+ return PTR_ERR(obj); >+ >+ if (obj->mm.pages->nents != 1) { >+ pr_err("%s min object spans multiple sg entries\n", >__func__); >+ err = -EINVAL; >+ goto err_close_objects; >+ } >+ >+ igt_object_release(obj); >+ >+ /* Max size */ >+ obj = igt_object_create(mem, &objects, total, >I915_BO_ALLOC_CONTIGUOUS); >+ if (IS_ERR(obj)) >+ return PTR_ERR(obj); >+ >+ if (obj->mm.pages->nents != 1) { >+ pr_err("%s max object spans multiple sg entries\n", >__func__); >+ err = -EINVAL; >+ goto err_close_objects; >+ } >+ >+ igt_object_release(obj); >+ >+ /* Internal fragmentation should not bleed into the object size */ >+ target = round_up(prandom_u32_state(&prng) % total, PAGE_SIZE); >+ target = max_t(u64, PAGE_SIZE, target); >+ >+ obj = igt_object_create(mem, &objects, target, >+ I915_BO_ALLOC_CONTIGUOUS); >+ if (IS_ERR(obj)) >+ return PTR_ERR(obj); >+ >+ if (obj->base.size != target) { >+ pr_err("%s obj->base.size(%llx) != target(%llx)\n", __func__, >+ (u64)obj->base.size, (u64)target); >+ err = -EINVAL; >+ goto err_close_objects; >+ } >+ >+ if (obj->mm.pages->nents != 1) { >+ pr_err("%s object spans multiple sg entries\n", __func__); >+ err = -EINVAL; >+ goto err_close_objects; >+ } >+ >+ igt_object_release(obj); >+ >+ /* >+ * Try to fragment the address space, such that half of it is free, but >+ * the max contiguous block size is SZ_64K. >+ */ >+ >+ target = SZ_64K; >+ n_objects = div64_u64(total, target); >+ >+ while (n_objects--) { >+ struct list_head *list; >+ >+ if (n_objects % 2) >+ list = &holes; >+ else >+ list = &objects; >+ >+ obj = igt_object_create(mem, list, target, >+ I915_BO_ALLOC_CONTIGUOUS); >+ if (IS_ERR(obj)) { >+ err = PTR_ERR(obj); >+ goto err_close_objects; >+ } >+ } >+ >+ close_objects(&holes); >+ >+ min = target; >+ target = total >> 1; >+ >+ /* Make sure we can still allocate all the fragmented space */ >+ obj = igt_object_create(mem, &objects, target, 0); >+ if (IS_ERR(obj)) >+ return PTR_ERR(obj); >+ >+ igt_object_release(obj); >+ >+ /* >+ * Even though we have enough free space, we don't have a big >enough >+ * contiguous block. Make sure that holds true. >+ */ >+ >+ do { >+ bool should_fail = target > min; >+ >+ obj = igt_object_create(mem, &objects, target, >+ I915_BO_ALLOC_CONTIGUOUS); >+ if (should_fail != IS_ERR(obj)) { >+ pr_err("%s target allocation(%llx) mismatch\n", >+ __func__, (u64)target); >+ err = -EINVAL; >+ goto err_close_objects; >+ } >+ >+ target >>= 1; >+ } while (target >= mem->mm.chunk_size); >+ >+err_close_objects: >+ list_splice_tail(&holes, &objects); >+ close_objects(&objects); >+ return err; >+} >+ > int intel_memory_region_mock_selftests(void) > { > static const struct i915_subtest tests[] = { > SUBTEST(igt_mock_fill), >+ SUBTEST(igt_mock_continuous), > }; > struct intel_memory_region *mem; > struct drm_i915_private *i915; >diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c >b/drivers/gpu/drm/i915/selftests/mock_region.c >index 0e9a575ede3b..7b0c99ddc2d5 100644 >--- a/drivers/gpu/drm/i915/selftests/mock_region.c >+++ b/drivers/gpu/drm/i915/selftests/mock_region.c >@@ -36,7 +36,7 @@ mock_object_create(struct intel_memory_region >*mem, > > i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); > >- i915_gem_object_init_memory_region(obj, mem); >+ i915_gem_object_init_memory_region(obj, mem, flags); > > return obj; > } >-- >2.20.1 > >_______________________________________________ >Intel-gfx mailing list >Intel-gfx@xxxxxxxxxxxxxxxxxxxxx >https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx