Test the low-level i915_address_space interfaces to sanity check the live insertion/removal of address ranges. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Matthew Auld <matthew.auld@xxxxxxxxx> --- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 93 +++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 7d695cdcd20a..abde71d857e0 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -26,6 +26,7 @@ #include <linux/prime_numbers.h> #include "../i915_selftest.h" +#include "i915_random.h" #include "mock_drm.h" @@ -485,6 +486,86 @@ static int walk_hole(struct drm_i915_private *i915, return 0; } +static int drunk_hole(struct drm_i915_private *i915, + struct i915_address_space *vm, + u64 hole_start, u64 hole_end, + unsigned long end_time) +{ + I915_RND_STATE(seed_prng); + unsigned int size; + + /* Keep creating larger objects until one cannot fit into the hole */ + for (size = 12; (hole_end - hole_start) >> size; size++) { + I915_RND_SUBSTATE(prng, seed_prng); + struct drm_i915_gem_object *obj; + unsigned int *order, count, n; + u64 hole_size; + + hole_size = (hole_end - hole_start) >> size; + if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) + hole_size = KMALLOC_MAX_SIZE / sizeof(u32); + count = hole_size; + do { + count >>= 1; + order = i915_random_order(count, &prng); + } while (!order && count); + if (!order) + break; + + /* Ignore allocation failures (i.e. don't report them as + * a test failure) as we are purposefully allocating very + * large objects without checking that we have sufficient + * memory. We expect to hit -ENOMEM. + */ + + obj = fake_dma_object(i915, BIT_ULL(size)); + if (IS_ERR(obj)) { + kfree(order); + break; + } + + GEM_BUG_ON(obj->base.size != BIT_ULL(size)); + + if (i915_gem_object_pin_pages(obj)) { + i915_gem_object_put(obj); + kfree(order); + break; + } + + for (n = 0; n < count; n++) { + if (vm->allocate_va_range && + vm->allocate_va_range(vm, + order[n] * BIT_ULL(size), + BIT_ULL(size))) + break; + + vm->insert_entries(vm, obj->mm.pages, + order[n] * BIT_ULL(size), + I915_CACHE_NONE, 0); + if (igt_timeout(end_time, + "%s timed out after %d/%d\n", + __func__, n, count)) { + hole_start = hole_end; /* quit */ + break; + } + } + count = n; + + i915_random_reorder(order, count, &prng); + for (n = 0; n < count; n++) + vm->clear_range(vm, + order[n] * BIT_ULL(size), + BIT_ULL(size)); + + i915_gem_object_unpin_pages(obj); + i915_gem_object_put(obj); + + kfree(order); + } + + return 0; +} + static int exercise_ppgtt(struct drm_i915_private *dev_priv, int (*func)(struct drm_i915_private *i915, struct i915_address_space *vm, @@ -532,6 +613,11 @@ static int igt_ppgtt_walk(void *arg) return exercise_ppgtt(arg, walk_hole); } +static int igt_ppgtt_drunk(void *arg) +{ + return exercise_ppgtt(arg, drunk_hole); +} + static int sort_holes(void *priv, struct list_head *A, struct list_head *B) { struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack); @@ -591,12 +677,19 @@ static int igt_ggtt_walk(void *arg) return exercise_ggtt(arg, walk_hole); } +static int igt_ggtt_drunk(void *arg) +{ + return exercise_ggtt(arg, drunk_hole); +} + int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_ppgtt_alloc), + SUBTEST(igt_ppgtt_drunk), SUBTEST(igt_ppgtt_walk), SUBTEST(igt_ppgtt_fill), + SUBTEST(igt_ggtt_drunk), SUBTEST(igt_ggtt_walk), SUBTEST(igt_ggtt_fill), }; -- 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx