From: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx> In the following patch we need to reserve regions unaccessible to the driver during initialization, so add mem->reserved for collecting such regions. v2: turn into an actual intel_memory_region_reserve api Cc: Imre Deak <imre.deak@xxxxxxxxx> Signed-off-by: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx> Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx> Reviewed-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/intel_memory_region.c | 14 ++++ drivers/gpu/drm/i915/intel_memory_region.h | 5 ++ .../drm/i915/selftests/intel_memory_region.c | 77 +++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index b1b610bfff09..49d306b5532f 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -156,9 +156,22 @@ int intel_memory_region_init_buddy(struct intel_memory_region *mem) void intel_memory_region_release_buddy(struct intel_memory_region *mem) { + i915_buddy_free_list(&mem->mm, &mem->reserved); i915_buddy_fini(&mem->mm); } +int intel_memory_region_reserve(struct intel_memory_region *mem, + u64 offset, u64 size) +{ + int ret; + + mutex_lock(&mem->mm_lock); + ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size); + mutex_unlock(&mem->mm_lock); + + return ret; +} + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, @@ -185,6 +198,7 @@ intel_memory_region_create(struct drm_i915_private *i915, mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); INIT_LIST_HEAD(&mem->objects.purgeable); + INIT_LIST_HEAD(&mem->reserved); mutex_init(&mem->mm_lock); diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 6ffc0673f005..d17e4fe3123c 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -89,6 +89,8 @@ struct intel_memory_region { unsigned int id; char name[8]; + struct list_head reserved; + dma_addr_t remap_addr; struct { @@ -113,6 +115,9 @@ void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, struct list_head *blocks); void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block); +int intel_memory_region_reserve(struct intel_memory_region *mem, + u64 offset, u64 size); + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index ce7adfa3bca0..64348528e1d5 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -144,6 +144,82 @@ static bool is_contiguous(struct drm_i915_gem_object *obj) return true; } +static int igt_mock_reserve(void *arg) +{ + struct intel_memory_region *mem = arg; + resource_size_t avail = resource_size(&mem->region); + struct drm_i915_gem_object *obj; + const u32 chunk_size = SZ_32M; + u32 i, offset, count, *order; + u64 allocated, cur_avail; + I915_RND_STATE(prng); + LIST_HEAD(objects); + int err = 0; + + if (!list_empty(&mem->reserved)) { + pr_err("%s region reserved list is not empty\n", __func__); + return -EINVAL; + } + + count = avail / chunk_size; + order = i915_random_order(count, &prng); + if (!order) + return 0; + + /* Reserve a bunch of ranges within the region */ + for (i = 0; i < count; ++i) { + u64 start = order[i] * chunk_size; + u64 size = i915_prandom_u32_max_state(chunk_size, &prng); + + /* Allow for some really big holes */ + if (!size) + continue; + + size = round_up(size, PAGE_SIZE); + offset = igt_random_offset(&prng, 0, chunk_size, size, + PAGE_SIZE); + + err = intel_memory_region_reserve(mem, start + offset, size); + if (err) { + pr_err("%s failed to reserve range", __func__); + goto out_close; + } + + /* XXX: maybe sanity check the block range here? */ + avail -= size; + } + + /* Try to see if we can allocate from the remaining space */ + allocated = 0; + cur_avail = avail; + do { + u32 size = i915_prandom_u32_max_state(cur_avail, &prng); + + size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE); + obj = igt_object_create(mem, &objects, size, 0); + if (IS_ERR(obj)) { + if (PTR_ERR(obj) == -ENXIO) + break; + + err = PTR_ERR(obj); + goto out_close; + } + cur_avail -= size; + allocated += size; + } while (1); + + if (allocated != avail) { + pr_err("%s mismatch between allocation and free space", __func__); + err = -EINVAL; + } + +out_close: + kfree(order); + close_objects(mem, &objects); + i915_buddy_free_list(&mem->mm, &mem->reserved); + return err; +} + static int igt_mock_contiguous(void *arg) { struct intel_memory_region *mem = arg; @@ -930,6 +1006,7 @@ static int perf_memcpy(void *arg) int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { + SUBTEST(igt_mock_reserve), SUBTEST(igt_mock_fill), SUBTEST(igt_mock_contiguous), SUBTEST(igt_mock_splintered_region), -- 2.26.2 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx