Test have similar functionality that gem_cs_prefetch IGT test but gem_cs_prefetch is not up to date and there was an idea to move this test to kselftests so this is respond for this request. There is another patch on igt_dev which is removing gem_cs_prefetch test from IGT. --- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 134 ++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 89b6ca9b14a7..42403da610c6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1625,6 +1625,139 @@ static int igt_gtt_insert(void *arg) return err; } +#define COUNT 10 +static int igt_ggtt_scratch(void *arg) +{ + static struct insert_mode { + const char *name; + enum drm_mm_insert_mode mode; + } modes[] = { + { "bottom-up", DRM_MM_INSERT_LOW }, + { "top-down", DRM_MM_INSERT_HIGH }, + {} + }; + + I915_RND_STATE(prng); + + struct insert_mode *mode; + struct drm_i915_private *i915 = arg; + struct i915_ggtt *ggtt = &i915->ggtt; + struct drm_i915_gem_object *obj; + struct drm_mm_node *node; + struct drm_mm_node tmp[COUNT]; + unsigned int *order; + int err = 0; + int n, m; + u64 hole_start, hole_end = 0; + u64 offset; + u32 __iomem *vaddr; + u32 val; + + mutex_lock(&i915->drm.struct_mutex); + + for (mode = modes; mode->name; mode++) { + order = i915_random_order(COUNT, &prng); + if (!order) { + err = -ENOMEM; + } + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_unlock; + } + + drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) { + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_free; + + intel_runtime_pm_get(i915); + for (m = 0; m < COUNT; m++) { + memset(&tmp[order[m]], 0, sizeof(tmp[order[m]])); + + err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp[order[m]], + 3 * PAGE_SIZE, 0, + I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + mode->mode); + if (err) + goto out_unpin; + + offset = tmp[order[m]].start; + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, 0), + offset + PAGE_SIZE, I915_CACHE_NONE, 0); + } + + i915_random_reorder(order, COUNT, &prng); + + for (m = 0; m < COUNT; m++) { + for ( n = 0; n <= 2; n++) + { + offset = tmp[order[m]].start + n * PAGE_SIZE; + vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); + iowrite32(n, vaddr); + io_mapping_unmap_atomic(vaddr); + } + + i915_gem_flush_ggtt_writes(i915); + + for ( n = 0; n <= 2; n++) + { + offset = tmp[order[m]].start + n * PAGE_SIZE; + vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); + val = ioread32(vaddr); + io_mapping_unmap_atomic(vaddr); + + if ((n != 1) && (val != 2)) { + pr_err("insert page failed: found %d, expected %d\n", + val, 2); + err = -EINVAL; + break; + } else if ((n == 1) && (val != n)) { + pr_err("insert page failed: found %d, expected %d\n", + val, n); + err = -EINVAL; + break; + } + } + + ggtt->base.clear_range(&ggtt->base, tmp[order[m]].start, tmp[order[m]].size); + ggtt->invalidate(i915); + + for ( n = 0; n <= 2; n++) + { + offset = tmp[order[m]].start + n * PAGE_SIZE; + vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); + val = ioread32(vaddr); + io_mapping_unmap_atomic(vaddr); + + if (val != 2) { + pr_err("insert page failed: found %d, expected %d\n", + val, 2); + err = -EINVAL; + break; + } + } + } + for (m = 0; m < COUNT; m++) { + drm_mm_remove_node(&tmp[m]); + } +out_unpin: + intel_runtime_pm_put(i915); + i915_gem_object_unpin_pages(obj); + } +out_free: + i915_gem_object_put(obj); + kfree(order); + } +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + int i915_gem_gtt_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -1667,6 +1800,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ggtt_pot), SUBTEST(igt_ggtt_fill), SUBTEST(igt_ggtt_page), + SUBTEST(igt_ggtt_scratch), }; GEM_BUG_ON(offset_in_page(i915->ggtt.base.total)); -- 2.14.3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx