Check that if we request bottom-up allocation from drm_mm_insert_node() we receive the next available hole from the bottom. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/selftests/drm_mm_selftests.h | 1 + drivers/gpu/drm/selftests/test-drm_mm.c | 102 +++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h index 6a4575fdc1c0..37bbdac52896 100644 --- a/drivers/gpu/drm/selftests/drm_mm_selftests.h +++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h @@ -17,6 +17,7 @@ selftest(align32, igt_align32) selftest(align64, igt_align64) selftest(evict, igt_evict) selftest(evict_range, igt_evict_range) +selftest(bottomup, igt_bottomup) selftest(topdown, igt_topdown) selftest(color, igt_color) selftest(color_evict, igt_color_evict) diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index 97468b8b33a0..081b5a1c565f 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -1653,6 +1653,108 @@ static int igt_topdown(void *ignored) return ret; } +static int igt_bottomup(void *ignored) +{ + RND_STATE(prng, random_seed); + const unsigned int count = 8192; + unsigned int size; + unsigned long *bitmap; + struct drm_mm mm; + struct drm_mm_node *nodes, *node, *next; + unsigned int *order, n, m, o = 0; + int ret, err; + + /* Like igt_topdown, but instead of searching for the last hole, + * we search for the first. + */ + + ret = -ENOMEM; + nodes = vzalloc(count * sizeof(*nodes)); + if (!nodes) + goto err; + + bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), + GFP_TEMPORARY); + if (!bitmap) + goto err_nodes; + + order = drm_random_order(count, &prng); + if (!order) + goto err_bitmap; + + ret = -EINVAL; + for (size = 1; size <= 64; size <<= 1) { + drm_mm_init(&mm, 0, size*count); + for (n = 0; n < count; n++) { + err = drm_mm_insert_node_generic(&mm, &nodes[n], + size, 0, n, + DRM_MM_INSERT_LOW); + if (err || !assert_node(&nodes[n], &mm, size, 0, n)) { + pr_err("bottomup insert failed, size %u step %d\n", size, n); + ret = err ?: -EINVAL; + goto out; + } + + if (!assert_one_hole(&mm, size*(n + 1), size*count)) + goto out; + } + + if (!assert_continuous(&mm, size)) + goto out; + + drm_random_reorder(order, count, &prng); + drm_for_each_prime(n, min(count, max_prime)) { + for (m = 0; m < n; m++) { + node = &nodes[order[(o + m) % count]]; + drm_mm_remove_node(node); + __set_bit(node_index(node), bitmap); + } + + for (m = 0; m < n; m++) { + unsigned int first; + + node = &nodes[order[(o + m) % count]]; + err = drm_mm_insert_node_generic(&mm, node, size, 0, 0, + DRM_MM_INSERT_LOW); + if (err || !assert_node(node, &mm, size, 0, 0)) { + pr_err("insert failed, step %d/%d\n", m, n); + ret = err ?: -EINVAL; + goto out; + } + + first = find_first_bit(bitmap, count); + if (node_index(node) != first) { + pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n", + m, n, first, node_index(node)); + goto out; + } + __clear_bit(first, bitmap); + } + + DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count); + + o += n; + } + + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + DRM_MM_BUG_ON(!drm_mm_clean(&mm)); + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + kfree(order); +err_bitmap: + kfree(bitmap); +err_nodes: + vfree(nodes); +err: + return ret; +} + static void separate_adjacent_colors(const struct drm_mm_node *node, unsigned long color, u64 *start, -- 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx