[PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Marius Vlad <marius.c.vlad@xxxxxxxxx>

Signed-off-by: Marius Vlad <marius.c.vlad@xxxxxxxxx>
---
 tests/pm_rpm.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 120 insertions(+)

diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
index c4fb19c..157cf29 100644
--- a/tests/pm_rpm.c
+++ b/tests/pm_rpm.c
@@ -1729,6 +1729,120 @@ static void planes_subtest(bool universal, bool dpms)
 	}
 }
 
+static void pm_test_tiling(void)
+{
+	uint32_t *handles;
+	uint8_t **gem_bufs;
+
+	int max_gem_objs = 0;
+	uint8_t off_bit = 20;
+	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
+
+	uint32_t i, j, tiling_modes[3] = {
+		I915_TILING_NONE,
+		I915_TILING_X,
+		I915_TILING_Y,
+	};
+	uint32_t ti, sw;
+
+	/* default value */
+	uint32_t stride = 1024;
+
+	/* calculate how many objects we can map */
+	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
+		;
+
+	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
+	handles = malloc(sizeof(uint32_t) * max_gem_objs);
+
+	/* map to gtt and store some random data */
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		handles[i] = gem_create(drm_fd, j);
+		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
+		memset(gem_bufs[i], 0x65, j);
+	}
+
+	/* try to set different tiling for each handle */
+	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		for (j = 0; j < max_gem_objs; j++) {
+			gem_set_tiling(drm_fd, handles[j], tiling_modes[i], stride);
+
+			gem_get_tiling(drm_fd, handles[j], &ti, &sw);
+			igt_assert(tiling_modes[i] == ti);
+		}
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		igt_assert(munmap(gem_bufs[i], j) == 0);
+		gem_close(drm_fd, handles[i]);
+	}
+
+	free(gem_bufs);
+	free(handles);
+}
+
+static void pm_test_caching(void)
+{
+	uint32_t *handles;
+	uint8_t **gem_bufs;
+	int8_t has_caching_display = -1;
+
+	uint32_t i, j, got_caching;
+	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
+	uint32_t cache_levels[3] = {
+		I915_CACHING_NONE,
+		I915_CACHING_CACHED,		/* LLC caching */
+		I915_CACHING_DISPLAY,		/* eDRAM caching */
+	};
+
+	int max_gem_objs = 0;
+	uint8_t off_bit = 20;
+
+	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
+		;
+
+	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
+	handles = malloc(sizeof(uint32_t) * max_gem_objs);
+
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		handles[i] = gem_create(drm_fd, j);
+		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
+		memset(gem_bufs[i], 0x65, j);
+	}
+
+	/* figure out if we have cache display available on the platform */
+	gem_set_caching(drm_fd, handles[0], I915_CACHING_DISPLAY);
+	if (gem_get_caching(drm_fd, handles[0]))
+		has_caching_display++;
+
+	for (i = 0; i < ARRAY_SIZE(cache_levels) + has_caching_display; i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		for (j = 0; j < max_gem_objs; j++) {
+			gem_set_caching(drm_fd, handles[j], cache_levels[i]);
+
+			igt_debug("Verying cache for handle %u, level %u\n", j, i);
+			got_caching = gem_get_caching(drm_fd, handles[j]);
+
+			igt_assert(got_caching == cache_levels[i]);
+		}
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		igt_assert(munmap(gem_bufs[i], j) == 0);
+		gem_close(drm_fd, handles[i]);
+	}
+
+	free(handles);
+	free(gem_bufs);
+}
+
 static void fences_subtest(bool dpms)
 {
 	int i;
@@ -1927,6 +2041,12 @@ int main(int argc, char *argv[])
 	igt_subtest("gem-execbuf-stress-extra-wait")
 		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
 
+	/* power-wake reference tests */
+	igt_subtest("pm-tiling")
+		pm_test_tiling();
+	igt_subtest("pm-caching")
+		pm_test_caching();
+
 	igt_fixture
 		teardown_environment();
 
-- 
2.6.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux