[PATCH] tests/gem_userptr_blits: Expanded userptr test cases

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>

A set of userptr test cases to support the new feature.

For the eviction and swapping stress testing I have extracted
some common behaviour from gem_evict_everything and made both
test cases use it to avoid duplicating the code.

Both unsynchronized and synchronized userptr objects are
tested but the latter set of tests will be skipped if kernel
is compiled without MMU_NOTIFIERS.

Also, with 32-bit userspace swapping tests are skipped if
the system has a lot more RAM than process address space.
Forking swapping tests are not skipped since they can still
trigger swapping by cumulative effect.

Tested with userptr patch v18 on Android (with some swap added).

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
---
 lib/drmtest.c                |  185 ++++++++
 lib/drmtest.h                |   32 ++
 tests/.gitignore             |    2 +-
 tests/Makefile.sources       |    2 +-
 tests/gem_evict_everything.c |  182 ++------
 tests/gem_userptr_blits.c    |  990 ++++++++++++++++++++++++++++++++++++++++++
 tests/gem_vmap_blits.c       |  344 ---------------
 7 files changed, 1235 insertions(+), 502 deletions(-)
 create mode 100644 tests/gem_userptr_blits.c
 delete mode 100644 tests/gem_vmap_blits.c

diff --git a/lib/drmtest.c b/lib/drmtest.c
index 8bc70a3..b5c1971 100644
--- a/lib/drmtest.c
+++ b/lib/drmtest.c
@@ -1685,3 +1685,188 @@ void igt_drop_root(void)
 	igt_assert(getgid() == 2);
 	igt_assert(getuid() == 2);
 }
+
+static void igt_exchange_uint32_t(void *array, unsigned i, unsigned j)
+{
+	uint32_t *i_arr = array;
+	uint32_t i_tmp;
+
+	i_tmp = i_arr[i];
+	i_arr[i] = i_arr[j];
+	i_arr[j] = i_tmp;
+}
+
+int igt_minor_evictions(int fd, struct igt_eviction_test_ops *ops,
+			int surface_size, int nr_surfaces)
+{
+	uint32_t *bo, *sel;
+	int n, m, pass, fail;
+
+	igt_require((uint64_t)nr_surfaces * surface_size / (1024 * 1024)
+			< intel_get_total_ram_mb() * 9 / 10);
+
+	bo = malloc(3*nr_surfaces*sizeof(*bo));
+	igt_assert(bo);
+
+	for (n = 0; n < 2*nr_surfaces; n++)
+		bo[n] = ops->create(fd, surface_size);
+
+	sel = bo + n;
+	for (fail = 0, m = 0; fail < 10; fail++) {
+		for (pass = 0; pass < 100; pass++) {
+			for (n = 0; n < nr_surfaces; n++, m += 7)
+				sel[n] = bo[m%(2*nr_surfaces)];
+			ops->copy(fd, sel[0], sel[1], sel, nr_surfaces, 0);
+		}
+		ops->copy(fd, bo[0], bo[0], bo, 2*nr_surfaces, ENOSPC);
+	}
+
+	for (n = 0; n < 2*nr_surfaces; n++)
+		ops->close(fd, bo[n]);
+	free(bo);
+
+	return 0;
+}
+
+int igt_major_evictions(int fd, struct igt_eviction_test_ops *ops,
+			int surface_size, int nr_surfaces)
+{
+	int n, m, loop;
+	uint32_t *bo;
+
+	igt_require((uint64_t)nr_surfaces * surface_size / (1024 * 1024)
+			< intel_get_total_ram_mb() * 9 / 10);
+
+	bo = malloc(nr_surfaces*sizeof(*bo));
+	igt_assert(bo);
+
+	for (n = 0; n < nr_surfaces; n++)
+		bo[n] = ops->create(fd, surface_size);
+
+	for (loop = 0, m = 0; loop < 100; loop++, m += 17) {
+		n = m % nr_surfaces;
+		ops->copy(fd, bo[n], bo[n], &bo[n], 1, 0);
+	}
+
+	for (n = 0; n < nr_surfaces; n++)
+		ops->close(fd, bo[n]);
+	free(bo);
+
+	return 0;
+}
+
+int igt_swapping_evictions(int fd, struct igt_eviction_test_ops *ops,
+			   int surface_size,
+			   int working_surfaces,
+			   int trash_surfaces)
+{
+	uint32_t *bo;
+	int i, n, pass;
+
+	igt_require((uint64_t)working_surfaces * surface_size / (1024 * 1024)
+			< intel_get_total_ram_mb() * 9 / 10);
+
+	if (trash_surfaces < working_surfaces)
+		trash_surfaces = working_surfaces;
+
+	bo = malloc(trash_surfaces*sizeof(*bo));
+	igt_assert(bo);
+
+	for (n = 0; n < trash_surfaces; n++)
+		bo[n] = ops->create(fd, surface_size);
+
+	for (i = 0; i < trash_surfaces/32; i++) {
+		igt_permute_array(bo, trash_surfaces, igt_exchange_uint32_t);
+
+		for (pass = 0; pass < 100; pass++) {
+			ops->copy(fd, bo[0], bo[1], bo, working_surfaces, 0);
+		}
+	}
+
+	for (n = 0; n < trash_surfaces; n++)
+		ops->close(fd, bo[n]);
+	free(bo);
+
+	return 0;
+}
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+int igt_forking_evictions(int fd, struct igt_eviction_test_ops *ops,
+			  int surface_size, int working_surfaces,
+			  int trash_surfaces, unsigned flags)
+{
+	uint32_t *bo;
+	int n, pass, l;
+	int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
+	int bo_count;
+
+	igt_require((uint64_t)working_surfaces * surface_size / (1024 * 1024)
+			< intel_get_total_ram_mb() * 9 / 10);
+
+	if (flags & FORKING_EVICTIONS_SWAPPING) {
+		igt_require(intel_get_total_ram_mb() / 4
+				< intel_get_total_swap_mb());
+		bo_count = trash_surfaces;
+
+		if (bo_count < working_surfaces)
+			bo_count = working_surfaces;
+	} else
+		bo_count = working_surfaces;
+
+	bo = malloc(bo_count*sizeof(*bo));
+	igt_assert(bo);
+
+	for (n = 0; n < bo_count; n++)
+		bo[n] = ops->create(fd, surface_size);
+
+	igt_fork(i, min(num_threads * 4, 12)) {
+		int realfd = fd;
+		int num_passes = flags & FORKING_EVICTIONS_SWAPPING ? 10 : 100;
+
+		/* Every fork should have a different permutation! */
+		srand(i * 63);
+
+		if (flags & FORKING_EVICTIONS_INTERRUPTIBLE)
+			igt_fork_signal_helper();
+
+		igt_permute_array(bo, bo_count, igt_exchange_uint32_t);
+
+		if (flags & FORKING_EVICTIONS_DUP_DRMFD) {
+			realfd = drm_open_any();
+
+			/* We can overwrite the bo array since we're forked. */
+			for (l = 0; l < bo_count; l++) {
+				uint32_t flink;
+
+				flink = gem_flink(fd, bo[l]);
+				bo[l] = gem_open(realfd, flink);
+			}
+		}
+
+		for (pass = 0; pass < num_passes; pass++) {
+			ops->copy(realfd, bo[0], bo[1], bo, working_surfaces, 0);
+
+			for (l = 0; l < working_surfaces &&
+			  (flags & FORKING_EVICTIONS_MEMORY_PRESSURE);
+			    l++) {
+				ops->clear(realfd, bo[l], surface_size);
+			}
+		}
+
+		if (flags & FORKING_EVICTIONS_INTERRUPTIBLE)
+			igt_stop_signal_helper();
+
+		/* drmfd closing will take care of additional bo refs */
+		if (flags & FORKING_EVICTIONS_DUP_DRMFD)
+			close(realfd);
+	}
+
+	igt_waitchildren();
+
+	for (n = 0; n < bo_count; n++)
+		ops->close(fd, bo[n]);
+	free(bo);
+
+	return 0;
+}
diff --git a/lib/drmtest.h b/lib/drmtest.h
index d42a6f7..022af1d 100644
--- a/lib/drmtest.h
+++ b/lib/drmtest.h
@@ -361,4 +361,36 @@ void igt_system_suspend_autoresume(void);
 /* dropping priviledges */
 void igt_drop_root(void);
 
+struct igt_eviction_test_ops
+{
+	uint32_t (*create)(int fd, int size);
+	void	 (*close)(int fd, uint32_t bo);
+	void	 (*copy)(int fd, uint32_t dst, uint32_t src,
+			 uint32_t *all_bo, int nr_bos, int error);
+	void	 (*clear)(int fd, uint32_t bo, int size);
+};
+
+int igt_minor_evictions(int fd, struct igt_eviction_test_ops *ops,
+			int surface_size, int nr_surfaces);
+
+int igt_major_evictions(int fd, struct igt_eviction_test_ops *ops,
+			int surface_size, int nr_surfaces);
+
+int igt_swapping_evictions(int fd, struct igt_eviction_test_ops *ops,
+			   int surface_size, int working_surfaces,
+			   int trash_surfaces);
+
+#define FORKING_EVICTIONS_INTERRUPTIBLE	  (1 << 0)
+#define FORKING_EVICTIONS_SWAPPING	  (1 << 1)
+#define FORKING_EVICTIONS_DUP_DRMFD	  (1 << 2)
+#define FORKING_EVICTIONS_MEMORY_PRESSURE (1 << 3)
+#define ALL_FORKING_EVICTIONS	(FORKING_EVICTIONS_INTERRUPTIBLE | \
+				 FORKING_EVICTIONS_SWAPPING | \
+				 FORKING_EVICTIONS_DUP_DRMFD | \
+				 FORKING_EVICTIONS_MEMORY_PRESSURE)
+
+int igt_forking_evictions(int fd, struct igt_eviction_test_ops *ops,
+			  int surface_size, int working_surfaces,
+			  int trash_surfaces, unsigned flags);
+
 #endif /* DRMTEST_H */
diff --git a/tests/.gitignore b/tests/.gitignore
index 7377275..798eeed 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -91,7 +91,7 @@ gem_tiled_swapping
 gem_tiling_max_stride
 gem_unfence_active_buffers
 gem_unref_active_buffers
-gem_vmap_blits
+gem_userptr_blits
 gem_wait_render_timeout
 gem_write_read_ring_switch
 gen3_mixed_blits
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index a8c0c96..2d42a18 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -115,7 +115,7 @@ TESTS_progs = \
 	gem_tiling_max_stride \
 	gem_unfence_active_buffers \
 	gem_unref_active_buffers \
-	gem_vmap_blits \
+	gem_userptr_blits \
 	gem_wait_render_timeout \
 	gen3_mixed_blits \
 	gen3_render_linear_blits \
diff --git a/tests/gem_evict_everything.c b/tests/gem_evict_everything.c
index 41abef7..49436ea 100644
--- a/tests/gem_evict_everything.c
+++ b/tests/gem_evict_everything.c
@@ -125,183 +125,51 @@ copy(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo, int error)
 	free(obj);
 }
 
-static void exchange_uint32_t(void *array, unsigned i, unsigned j)
+static void clear(int fd, uint32_t handle, int size)
 {
-	uint32_t *i_arr = array;
-	uint32_t i_tmp;
+	void *base = gem_mmap__cpu(fd, handle, size, PROT_READ | PROT_WRITE);
 
-	i_tmp = i_arr[i];
-	i_arr[i] = i_arr[j];
-	i_arr[j] = i_tmp;
+	igt_assert(base != NULL);
+	memset(base, 0, size);
+	munmap(base, size);
 }
 
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
-#define INTERRUPTIBLE	(1 << 0)
-#define SWAPPING	(1 << 1)
-#define DUP_DRMFD	(1 << 2)
-#define MEMORY_PRESSURE	(1 << 3)
-#define ALL_FLAGS	(INTERRUPTIBLE | SWAPPING | DUP_DRMFD | MEMORY_PRESSURE)
+static struct igt_eviction_test_ops fault_ops = {
+	.create = gem_create,
+	.close = gem_close,
+	.copy = copy,
+	.clear = clear,
+};
 
 static void forked_evictions(int fd, int size, int count,
 			     unsigned flags)
 {
-	uint32_t *bo;
-	int n, pass, l;
-	int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
-	int bo_count;
-
-	igt_require((uint64_t)count * size / (1024 * 1024) < intel_get_total_ram_mb() * 9 / 10);
-
-	if (flags & SWAPPING) {
-		igt_require(intel_get_total_ram_mb() / 4 < intel_get_total_swap_mb());
-		bo_count = intel_get_total_ram_mb() * 11 / 10;
-
-		if (bo_count < count)
-			bo_count = count;
-	} else
-		bo_count = count;
-
-	bo = malloc(bo_count*sizeof(*bo));
-	igt_assert(bo);
-
-	for (n = 0; n < bo_count; n++)
-		bo[n] = gem_create(fd, size);
-
-	igt_fork(i, min(count, min(num_threads * 5, 12))) {
-		int realfd = fd;
-		int num_passes = flags & SWAPPING ? 10 : 100;
-
-		/* Every fork should have a different permutation! */
-		srand(i * 63);
-
-		if (flags & INTERRUPTIBLE)
-			igt_fork_signal_helper();
-
-		igt_permute_array(bo, bo_count, exchange_uint32_t);
-
-		if (flags & DUP_DRMFD) {
-			realfd = drm_open_any();
-
-			/* We can overwrite the bo array since we're forked. */
-			for (l = 0; l < count; l++) {
-				uint32_t flink;
-
-				flink = gem_flink(fd, bo[l]);
-				bo[l] = gem_open(realfd, flink);
-			}
-
-		}
-
-		for (pass = 0; pass < num_passes; pass++) {
-			copy(realfd, bo[0], bo[1], bo, count, 0);
-
-			for (l = 0; l < count && (flags & MEMORY_PRESSURE); l++) {
-				uint32_t *base = gem_mmap__cpu(realfd, bo[l],
-							       size,
-							       PROT_READ | PROT_WRITE);
-				memset(base, 0, size);
-				munmap(base, size);
-			}
-		}
-
-		if (flags & INTERRUPTIBLE)
-			igt_stop_signal_helper();
-
-		/* drmfd closing will take care of additional bo refs */
-		if (flags & DUP_DRMFD)
-			close(realfd);
-	}
+	int trash_count;
 
-	igt_waitchildren();
+	trash_count = intel_get_total_ram_mb() * 11 / 10;
 
-	for (n = 0; n < bo_count; n++)
-		gem_close(fd, bo[n]);
-	free(bo);
+	igt_forking_evictions(fd, &fault_ops, size, count, trash_count, flags);
 }
 
 static void swapping_evictions(int fd, int size, int count)
 {
-	uint32_t *bo;
-	int i, n, pass;
-	int bo_count;
-
-	igt_require((uint64_t)count * size / (1024 * 1024) < intel_get_total_ram_mb() * 9 / 10);
+	int trash_count;
 
 	igt_require(intel_get_total_ram_mb() / 4 < intel_get_total_swap_mb());
-	bo_count = intel_get_total_ram_mb() * 11 / 10;
-
-	if (bo_count < count)
-		bo_count = count;
-
-	bo = malloc(bo_count*sizeof(*bo));
-	igt_assert(bo);
-
-	for (n = 0; n < bo_count; n++)
-		bo[n] = gem_create(fd, size);
-
-	for (i = 0; i < bo_count/32; i++) {
-		igt_permute_array(bo, bo_count, exchange_uint32_t);
 
-		for (pass = 0; pass < 100; pass++) {
-			copy(fd, bo[0], bo[1], bo, count, 0);
-		}
-	}
+	trash_count = intel_get_total_ram_mb() * 11 / 10;
 
-	for (n = 0; n < bo_count; n++)
-		gem_close(fd, bo[n]);
-	free(bo);
+	igt_swapping_evictions(fd, &fault_ops, size, count, trash_count);
 }
 
 static void minor_evictions(int fd, int size, int count)
 {
-	uint32_t *bo, *sel;
-	int n, m, pass, fail;
-
-	igt_require((uint64_t)count * size / (1024 * 1024) < intel_get_total_ram_mb() * 9 / 10);
-
-	bo = malloc(3*count*sizeof(*bo));
-	igt_assert(bo);
-
-	for (n = 0; n < 2*count; n++)
-		bo[n] = gem_create(fd, size);
-
-	sel = bo + n;
-	for (fail = 0, m = 0; fail < 10; fail++) {
-		for (pass = 0; pass < 100; pass++) {
-			for (n = 0; n < count; n++, m += 7)
-				sel[n] = bo[m%(2*count)];
-			copy(fd, sel[0], sel[1], sel, count, 0);
-		}
-		copy(fd, bo[0], bo[0], bo, 2*count, ENOSPC);
-	}
-
-	for (n = 0; n < 2*count; n++)
-		gem_close(fd, bo[n]);
-	free(bo);
+	igt_minor_evictions(fd, &fault_ops, size, count);
 }
 
 static void major_evictions(int fd, int size, int count)
 {
-	int n, m, loop;
-	uint32_t *bo;
-
-	igt_require((uint64_t)count * size / (1024 * 1024) < intel_get_total_ram_mb() * 9 / 10);
-
-	bo = malloc(count*sizeof(*bo));
-	igt_assert(bo);
-
-	for (n = 0; n < count; n++)
-		bo[n] = gem_create(fd, size);
-
-	for (loop = 0, m = 0; loop < 100; loop++, m += 17) {
-		n = m % count;
-		copy(fd, bo[n], bo[n], &bo[n], 1, 0);
-	}
-
-	for (n = 0; n < count; n++)
-		gem_close(fd, bo[n]);
-	free(bo);
+	igt_major_evictions(fd, &fault_ops, size, count);
 }
 
 igt_main
@@ -319,12 +187,14 @@ igt_main
 		count = 3*gem_aperture_size(fd) / size / 4;
 	}
 
-	for (unsigned flags = 0; flags < ALL_FLAGS + 1; flags++) {
+	for (unsigned flags = 0; flags < ALL_FORKING_EVICTIONS + 1; flags++) {
 		igt_subtest_f("forked%s%s%s-%s",
-			      flags & SWAPPING ? "-swapping" : "",
-			      flags & DUP_DRMFD ? "-multifd" : "",
-			      flags & MEMORY_PRESSURE ? "-mempressure" : "",
-			      flags & INTERRUPTIBLE ? "interruptible" : "normal") {
+		    flags & FORKING_EVICTIONS_SWAPPING ? "-swapping" : "",
+		    flags & FORKING_EVICTIONS_DUP_DRMFD ? "-multifd" : "",
+		    flags & FORKING_EVICTIONS_MEMORY_PRESSURE ?
+				"-mempressure" : "",
+		    flags & FORKING_EVICTIONS_INTERRUPTIBLE ?
+				"interruptible" : "normal") {
 			forked_evictions(fd, size, count, flags);
 		}
 	}
diff --git a/tests/gem_userptr_blits.c b/tests/gem_userptr_blits.c
new file mode 100644
index 0000000..c94f686
--- /dev/null
+++ b/tests/gem_userptr_blits.c
@@ -0,0 +1,990 @@
+/*
+ * Copyright © 2009-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@xxxxxxxxxx>
+ *    Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
+ *    Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
+ *
+ */
+
+/** @file gem_userptr_blits.c
+ *
+ * This is a test of doing many blits using a mixture of normal system pages
+ * and uncached linear buffers, with a working set larger than the
+ * aperture size.
+ *
+ * The goal is to simply ensure the basics work.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+#include "drm.h"
+#include "i915_drm.h"
+#include "drmtest.h"
+#include "intel_bufmgr.h"
+#include "intel_batchbuffer.h"
+#include "intel_gpu_tools.h"
+
+#define WIDTH 512
+#define HEIGHT 512
+#define PAGE_SIZE 4096
+
+#define LOCAL_I915_GEM_USERPTR       0x34
+#define LOCAL_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_USERPTR, struct local_i915_gem_userptr)
+struct local_i915_gem_userptr {
+	uint64_t user_ptr;
+	uint64_t user_size;
+	uint32_t flags;
+#define I915_USERPTR_READ_ONLY (1<<0)
+#define I915_USERPTR_UNSYNCHRONIZED (1<<31)
+	uint32_t handle;
+};
+
+static uint32_t userptr_flags;
+
+static uint32_t linear[WIDTH*HEIGHT];
+
+static void gem_userptr_test_unsynchronized(void)
+{
+	userptr_flags = I915_USERPTR_UNSYNCHRONIZED;
+}
+
+static void gem_userptr_test_synchronized(void)
+{
+	userptr_flags = 0;
+}
+
+static int gem_userptr(int fd, void *ptr, int size, int read_only, uint32_t *handle)
+{
+	struct local_i915_gem_userptr userptr;
+	int ret;
+
+	userptr.user_ptr = (uintptr_t)ptr;
+	userptr.user_size = size;
+	userptr.flags = userptr_flags;
+	if (read_only)
+		userptr.flags |= I915_USERPTR_READ_ONLY;
+
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
+	if (ret)
+		ret = errno;
+	igt_skip_on_f(ret == ENODEV &&
+		      (userptr_flags & I915_USERPTR_UNSYNCHRONIZED) == 0,
+		      "Skipping, synchronized mappings with no kernel CONFIG_MMU_NOTIFIER?");
+	if (ret == 0)
+		*handle = userptr.handle;
+
+	return ret;
+}
+
+
+static void gem_userptr_sync(int fd, uint32_t handle)
+{
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+}
+
+static void
+copy(int fd, uint32_t dst, uint32_t src, int error)
+{
+	uint32_t batch[10];
+	struct drm_i915_gem_relocation_entry reloc[2];
+	struct drm_i915_gem_exec_object2 obj[3];
+	struct drm_i915_gem_execbuffer2 exec;
+	uint32_t handle;
+	int ret;
+
+	batch[0] = XY_SRC_COPY_BLT_CMD |
+		  XY_SRC_COPY_BLT_WRITE_ALPHA |
+		  XY_SRC_COPY_BLT_WRITE_RGB | 6;
+	batch[1] = (3 << 24) | /* 32 bits */
+		  (0xcc << 16) | /* copy ROP */
+		  WIDTH*4;
+	batch[2] = 0; /* dst x1,y1 */
+	batch[3] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
+	batch[4] = 0; /* dst reloc */
+	batch[5] = 0; /* src x1,y1 */
+	batch[6] = WIDTH*4;
+	batch[7] = 0; /* src reloc */
+	batch[8] = MI_BATCH_BUFFER_END;
+	batch[9] = MI_NOOP;
+
+	handle = gem_create(fd, 4096);
+	gem_write(fd, handle, 0, batch, sizeof(batch));
+
+	reloc[0].target_handle = dst;
+	reloc[0].delta = 0;
+	reloc[0].offset = 4 * sizeof(batch[0]);
+	reloc[0].presumed_offset = 0;
+	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;;
+	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
+
+	reloc[1].target_handle = src;
+	reloc[1].delta = 0;
+	reloc[1].offset = 7 * sizeof(batch[0]);
+	reloc[1].presumed_offset = 0;
+	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;;
+	reloc[1].write_domain = 0;
+
+	obj[0].handle = dst;
+	obj[0].relocation_count = 0;
+	obj[0].relocs_ptr = 0;
+	obj[0].alignment = 0;
+	obj[0].offset = 0;
+	obj[0].flags = 0;
+	obj[0].rsvd1 = 0;
+	obj[0].rsvd2 = 0;
+
+	obj[1].handle = src;
+	obj[1].relocation_count = 0;
+	obj[1].relocs_ptr = 0;
+	obj[1].alignment = 0;
+	obj[1].offset = 0;
+	obj[1].flags = 0;
+	obj[1].rsvd1 = 0;
+	obj[1].rsvd2 = 0;
+
+	obj[2].handle = handle;
+	obj[2].relocation_count = 2;
+	obj[2].relocs_ptr = (uintptr_t)reloc;
+	obj[2].alignment = 0;
+	obj[2].offset = 0;
+	obj[2].flags = 0;
+	obj[2].rsvd1 = obj[2].rsvd2 = 0;
+
+	exec.buffers_ptr = (uintptr_t)obj;
+	exec.buffer_count = 3;
+	exec.batch_start_offset = 0;
+	exec.batch_len = sizeof(batch);
+	exec.DR1 = exec.DR4 = 0;
+	exec.num_cliprects = 0;
+	exec.cliprects_ptr = 0;
+	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
+	exec.rsvd1 = exec.rsvd2 = 0;
+
+	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
+	if (ret)
+		ret = errno;
+	igt_assert(ret == error);
+
+	gem_close(fd, handle);
+}
+
+static void
+blit(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo, int error)
+{
+	uint32_t batch[12];
+	struct drm_i915_gem_relocation_entry reloc[2];
+	struct drm_i915_gem_exec_object2 *obj;
+	struct drm_i915_gem_execbuffer2 exec;
+	uint32_t handle;
+	int n, ret, i=0;
+
+	batch[i++] = (XY_SRC_COPY_BLT_CMD |
+		    XY_SRC_COPY_BLT_WRITE_ALPHA |
+		    XY_SRC_COPY_BLT_WRITE_RGB | 6);
+	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+		batch[i - 1] += 2;
+	batch[i++] = (3 << 24) | /* 32 bits */
+		  (0xcc << 16) | /* copy ROP */
+		  WIDTH*4;
+	batch[i++] = 0; /* dst x1,y1 */
+	batch[i++] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
+	batch[i++] = 0; /* dst reloc */
+	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+		batch[i++] = 0; /* FIXME */
+	batch[i++] = 0; /* src x1,y1 */
+	batch[i++] = WIDTH*4;
+	batch[i++] = 0; /* src reloc */
+	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+		batch[i++] = 0; /* FIXME */
+	batch[i++] = MI_BATCH_BUFFER_END;
+	batch[i++] = MI_NOOP;
+
+	handle = gem_create(fd, 4096);
+	gem_write(fd, handle, 0, batch, sizeof(batch));
+
+	reloc[0].target_handle = dst;
+	reloc[0].delta = 0;
+	reloc[0].offset = 4 * sizeof(batch[0]);
+	reloc[0].presumed_offset = 0;
+	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
+	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
+
+	reloc[1].target_handle = src;
+	reloc[1].delta = 0;
+	reloc[1].offset = 7 * sizeof(batch[0]);
+	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+		reloc[1].offset += sizeof(batch[0]);
+	reloc[1].presumed_offset = 0;
+	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
+	reloc[1].write_domain = 0;
+
+	obj = calloc(n_bo + 1, sizeof(*obj));
+	for (n = 0; n < n_bo; n++)
+		obj[n].handle = all_bo[n];
+	obj[n].handle = handle;
+	obj[n].relocation_count = 2;
+	obj[n].relocs_ptr = (uintptr_t)reloc;
+
+	exec.buffers_ptr = (uintptr_t)obj;
+	exec.buffer_count = n_bo + 1;
+	exec.batch_start_offset = 0;
+	exec.batch_len = i * 4;
+	exec.DR1 = exec.DR4 = 0;
+	exec.num_cliprects = 0;
+	exec.cliprects_ptr = 0;
+	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
+	i915_execbuffer2_set_context_id(exec, 0);
+	exec.rsvd2 = 0;
+
+	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
+	if (ret)
+		ret = errno;
+
+	igt_assert(ret == error);
+
+	gem_close(fd, handle);
+	free(obj);
+}
+
+static uint32_t
+create_userptr(int fd, uint32_t val, uint32_t *ptr)
+{
+	uint32_t handle;
+	int i, ret;
+
+	ret = gem_userptr(fd, ptr, sizeof(linear), 0, &handle);
+	igt_assert(ret == 0);
+	igt_assert(handle != 0);
+
+	/* Fill the BO with dwords starting at val */
+	for (i = 0; i < WIDTH*HEIGHT; i++)
+		ptr[i] = val++;
+
+	return handle;
+}
+
+static void **handle_ptr_map;
+static unsigned int num_handle_ptr_map;
+
+static void add_handle_ptr(uint32_t handle, void *ptr)
+{
+	if (handle >= num_handle_ptr_map) {
+		handle_ptr_map = realloc(handle_ptr_map,
+					 (handle + 1000) * sizeof(void*));
+		num_handle_ptr_map = handle + 1000;
+	}
+
+	handle_ptr_map[handle] = ptr;
+}
+
+static void *get_handle_ptr(uint32_t handle)
+{
+	return handle_ptr_map[handle];
+}
+
+static void free_handle_ptr(uint32_t handle)
+{
+	igt_assert(handle < num_handle_ptr_map);
+	igt_assert(handle_ptr_map[handle]);
+
+	free(handle_ptr_map[handle]);
+	handle_ptr_map[handle] = NULL;
+}
+
+static uint32_t create_userptr_bo(int fd, int size)
+{
+	void *ptr;
+	uint32_t handle;
+	int ret;
+
+	ret = posix_memalign(&ptr, PAGE_SIZE, size);
+	igt_assert(ret == 0);
+
+	ret = gem_userptr(fd, (uint32_t *)ptr, size, 0, &handle);
+	igt_assert(ret == 0);
+	add_handle_ptr(handle, ptr);
+
+	return handle;
+}
+
+static void clear(int fd, uint32_t handle, int size)
+{
+	void *ptr = get_handle_ptr(handle);
+
+	igt_assert(ptr != NULL);
+
+	memset(ptr, 0, size);
+}
+
+static void free_userptr_bo(int fd, uint32_t handle)
+{
+	gem_close(fd, handle);
+	free_handle_ptr(handle);
+}
+
+static uint32_t
+create_bo(int fd, uint32_t val)
+{
+	uint32_t handle;
+	int i;
+
+	handle = gem_create(fd, sizeof(linear));
+
+	/* Fill the BO with dwords starting at val */
+	for (i = 0; i < WIDTH*HEIGHT; i++)
+		linear[i] = val++;
+	gem_write(fd, handle, 0, linear, sizeof(linear));
+
+	return handle;
+}
+
+static void
+check_cpu(uint32_t *ptr, uint32_t val)
+{
+	int i;
+
+	for (i = 0; i < WIDTH*HEIGHT; i++) {
+		if (ptr[i] != val) {
+			fprintf(stderr, "Expected 0x%08x, found 0x%08x "
+				"at offset 0x%08x\n",
+				val, ptr[i], i * 4);
+			abort();
+		}
+		val++;
+	}
+}
+
+static void
+check_gpu(int fd, uint32_t handle, uint32_t val)
+{
+	gem_read(fd, handle, 0, linear, sizeof(linear));
+	check_cpu(linear, val);
+}
+
+static int has_userptr(int fd)
+{
+	uint32_t handle = 0;
+	void *ptr;
+	uint32_t oldflags;
+	int ret;
+
+	assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
+	oldflags = userptr_flags;
+	gem_userptr_test_unsynchronized();
+	ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle);
+	userptr_flags = oldflags;
+	if (ret != 0) {
+		free(ptr);
+		return 0;
+	}
+
+	gem_close(fd, handle);
+	free(ptr);
+
+	return handle != 0;
+}
+
+static int test_input_checking(int fd)
+{
+	struct local_i915_gem_userptr userptr;
+	int ret;
+
+	/* Invalid flags. */
+	userptr.user_ptr = 0;
+	userptr.user_size = 0;
+	userptr.flags = ~0;
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
+	igt_assert(ret != 0);
+
+	/* Too big. */
+	userptr.user_ptr = 0;
+	userptr.user_size = ~0;
+	userptr.flags = 0;
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
+	igt_assert(ret != 0);
+
+	/* Both wrong. */
+	userptr.user_ptr = 0;
+	userptr.user_size = ~0;
+	userptr.flags = ~0;
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
+	igt_assert(ret != 0);
+
+	return 0;
+}
+
+static int test_access_control(int fd)
+{
+	igt_fork(child, 1) {
+		void *ptr;
+		int ret;
+		uint32_t handle;
+
+		igt_drop_root();
+
+		/* CAP_SYS_ADMIN is needed for UNSYNCHRONIZED mappings. */
+		gem_userptr_test_unsynchronized();
+
+		igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
+
+		ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle);
+		if (ret == 0)
+			gem_close(fd, handle);
+		free(ptr);
+		igt_assert(ret == EPERM);
+	}
+
+	igt_waitchildren();
+
+	return 0;
+}
+
+static int test_usage_restrictions(int fd)
+{
+	void *ptr;
+	int ret;
+	uint32_t handle;
+
+	igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE * 2) == 0);
+
+	/* Address not aligned. */
+	ret = gem_userptr(fd, (char *)ptr + 1, PAGE_SIZE, 0, &handle);
+	igt_assert(ret != 0);
+
+	/* Size not rounded to page size. */
+	ret = gem_userptr(fd, ptr, PAGE_SIZE - 1, 0, &handle);
+	igt_assert(ret != 0);
+
+	/* Both wrong. */
+	ret = gem_userptr(fd, (char *)ptr + 1, PAGE_SIZE - 1, 0, &handle);
+	igt_assert(ret != 0);
+
+	free(ptr);
+
+	return 0;
+}
+
+static int test_create_destroy(int fd)
+{
+	void *ptr;
+	int ret;
+	uint32_t handle;
+
+	igt_assert(posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE) == 0);
+
+	ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle);
+	igt_assert(ret == 0);
+
+	gem_close(fd, handle);
+	free(ptr);
+
+	return 0;
+}
+
+static int test_coherency(int fd, int count)
+{
+	uint32_t *memory;
+	uint32_t *cpu, *cpu_val;
+	uint32_t *gpu, *gpu_val;
+	uint32_t start = 0;
+	int i, ret;
+
+	printf("Using 2x%d 1MiB buffers\n", count);
+
+	ret = posix_memalign((void **)&memory, PAGE_SIZE, count*sizeof(linear));
+	if (ret != 0 || memory == NULL) {
+		fprintf(stderr, "Unable to allocate %lld bytes\n",
+			(long long)count*sizeof(linear));
+		return 1;
+	}
+
+	gpu = malloc(sizeof(uint32_t)*count*4);
+	gpu_val = gpu + count;
+	cpu = gpu_val + count;
+	cpu_val = cpu + count;
+
+	for (i = 0; i < count; i++) {
+		gpu[i] = create_bo(fd, start);
+		gpu_val[i] = start;
+		start += WIDTH*HEIGHT;
+	}
+
+	for (i = 0; i < count; i++) {
+		cpu[i] = create_userptr(fd, start, memory+i*WIDTH*HEIGHT);
+		cpu_val[i] = start;
+		start += WIDTH*HEIGHT;
+	}
+
+	printf("Verifying initialisation...\n");
+	for (i = 0; i < count; i++) {
+		check_gpu(fd, gpu[i], gpu_val[i]);
+		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
+	}
+
+	printf("Cyclic blits cpu->gpu, forward...\n");
+	for (i = 0; i < count * 4; i++) {
+		int src = i % count;
+		int dst = (i + 1) % count;
+
+		copy(fd, gpu[dst], cpu[src], 0);
+		gpu_val[dst] = cpu_val[src];
+	}
+	for (i = 0; i < count; i++)
+		check_gpu(fd, gpu[i], gpu_val[i]);
+
+	printf("Cyclic blits gpu->cpu, backward...\n");
+	for (i = 0; i < count * 4; i++) {
+		int src = (i + 1) % count;
+		int dst = i % count;
+
+		copy(fd, cpu[dst], gpu[src], 0);
+		cpu_val[dst] = gpu_val[src];
+	}
+	for (i = 0; i < count; i++) {
+		gem_userptr_sync(fd, cpu[i]);
+		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
+	}
+
+	printf("Random blits...\n");
+	for (i = 0; i < count * 4; i++) {
+		int src = random() % count;
+		int dst = random() % count;
+
+		if (random() & 1) {
+			copy(fd, gpu[dst], cpu[src], 0);
+			gpu_val[dst] = cpu_val[src];
+		} else {
+			copy(fd, cpu[dst], gpu[src], 0);
+			cpu_val[dst] = gpu_val[src];
+		}
+	}
+	for (i = 0; i < count; i++) {
+		check_gpu(fd, gpu[i], gpu_val[i]);
+		gem_close(fd, gpu[i]);
+
+		gem_userptr_sync(fd, cpu[i]);
+		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
+		gem_close(fd, cpu[i]);
+	}
+
+	free(gpu);
+	free(memory);
+
+	return 0;
+}
+
+static struct igt_eviction_test_ops fault_ops = {
+	.create = create_userptr_bo,
+	.close = free_userptr_bo,
+	.copy = blit,
+	.clear = clear,
+};
+
+static int can_swap(void)
+{
+	unsigned long as, ram;
+
+	/* Cannot swap if not enough address space */
+
+	/* FIXME: Improve check criteria. */
+	if (sizeof(void*) < 8)
+		as = 3 * 1024;
+	else
+		as = 256 * 1024; /* Just a big number */
+
+	ram = intel_get_total_ram_mb();
+
+	if ((as - 128) < (ram - 256))
+		return 0;
+
+	return 1;
+}
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+static void forked_evictions(int fd, int size, int count,
+			     unsigned flags)
+{
+	int trash_count;
+	int num_threads;
+
+	trash_count = intel_get_total_ram_mb() * 11 / 10;
+	/* Use the fact test will spawn a number of child
+	 * processes meaning swapping will be triggered system
+	 * wide even if one process on it's own can't do it.
+	 */
+	num_threads = min(sysconf(_SC_NPROCESSORS_ONLN) * 4, 12);
+	trash_count /= num_threads;
+	if (count > trash_count)
+		count = trash_count;
+
+	igt_forking_evictions(fd, &fault_ops, size, count, trash_count, flags);
+}
+
+static void swapping_evictions(int fd, int size, int count)
+{
+	int trash_count;
+
+	igt_skip_on_f(!can_swap(),
+		"Not enough process address space for swapping tests.\n");
+
+	trash_count = intel_get_total_ram_mb() * 11 / 10;
+
+	igt_swapping_evictions(fd, &fault_ops, size, count, trash_count);
+}
+
+static void minor_evictions(int fd, int size, int count)
+{
+	igt_minor_evictions(fd, &fault_ops, size, count);
+}
+
+static void major_evictions(int fd, int size, int count)
+{
+	igt_major_evictions(fd, &fault_ops, size, count);
+}
+
+static int test_overlap(int fd, int expected)
+{
+	char *ptr;
+	int ret;
+	uint32_t handle, handle2;
+
+	igt_assert(posix_memalign((void *)&ptr, PAGE_SIZE, PAGE_SIZE * 3) == 0);
+
+	ret = gem_userptr(fd, ptr + PAGE_SIZE, PAGE_SIZE, 0, &handle);
+	igt_assert(ret == 0);
+
+	ret = gem_userptr(fd, ptr, PAGE_SIZE, 0, &handle2);
+	igt_assert(ret == 0);
+	gem_close(fd, handle2);
+
+	ret = gem_userptr(fd, ptr + PAGE_SIZE * 2, PAGE_SIZE, 0, &handle2);
+	igt_assert(ret == 0);
+	gem_close(fd, handle2);
+
+	ret = gem_userptr(fd, ptr, PAGE_SIZE * 2, 0, &handle2);
+	igt_assert(ret == expected);
+	if (ret == 0)
+		gem_close(fd, handle2);
+
+	ret = gem_userptr(fd, ptr + PAGE_SIZE, PAGE_SIZE * 2, 0, &handle2);
+	igt_assert(ret == expected);
+	if (ret == 0)
+		gem_close(fd, handle2);
+
+	ret = gem_userptr(fd, ptr, PAGE_SIZE * 3, 0, &handle2);
+	igt_assert(ret == expected);
+	if (ret == 0)
+		gem_close(fd, handle2);
+
+	gem_close(fd, handle);
+	free(ptr);
+
+	return 0;
+}
+
+static int test_unmap(int fd, int expected)
+{
+	char *ptr, *bo_ptr;
+	const unsigned int num_obj = 3;
+	unsigned int i;
+	uint32_t bo[num_obj + 1];
+	size_t map_size = sizeof(linear) * num_obj + (PAGE_SIZE - 1);
+	int ret;
+
+	ptr = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
+				MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+	assert(ptr != MAP_FAILED);
+
+	bo_ptr = (char *)(((unsigned long)ptr + (PAGE_SIZE - 1))
+						& ~(PAGE_SIZE - 1));
+
+	for (i = 0; i < num_obj; i++, bo_ptr += sizeof(linear)) {
+		ret = gem_userptr(fd, bo_ptr, sizeof(linear), 0, &bo[i]);
+		igt_assert(ret == 0);
+	}
+
+	bo[num_obj] = create_bo(fd, 0);
+
+	for (i = 0; i < num_obj; i++)
+		copy(fd, bo[num_obj], bo[i], 0);
+
+	ret = munmap(ptr, map_size);
+	assert(ret == 0);
+
+	for (i = 0; i < num_obj; i++)
+		copy(fd, bo[num_obj], bo[i], expected);
+
+	for (i = 0; i < (num_obj + 1); i++)
+		gem_close(fd, bo[i]);
+
+	return 0;
+}
+
+static int test_unmap_after_close(int fd)
+{
+	char *ptr, *bo_ptr;
+	const unsigned int num_obj = 3;
+	unsigned int i;
+	uint32_t bo[num_obj + 1];
+	size_t map_size = sizeof(linear) * num_obj + (PAGE_SIZE - 1);
+	int ret;
+
+	ptr = mmap(NULL, map_size, PROT_READ | PROT_WRITE,
+				MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+	assert(ptr != MAP_FAILED);
+
+	bo_ptr = (char *)(((unsigned long)ptr + (PAGE_SIZE - 1))
+						& ~(PAGE_SIZE - 1));
+
+	for (i = 0; i < num_obj; i++, bo_ptr += sizeof(linear)) {
+		ret = gem_userptr(fd, bo_ptr, sizeof(linear), 0, &bo[i]);
+		igt_assert(ret == 0);
+	}
+
+	bo[num_obj] = create_bo(fd, 0);
+
+	for (i = 0; i < num_obj; i++)
+		copy(fd, bo[num_obj], bo[i], 0);
+
+	for (i = 0; i < (num_obj + 1); i++)
+		gem_close(fd, bo[i]);
+
+	ret = munmap(ptr, map_size);
+	assert(ret == 0);
+
+	return 0;
+}
+
+static int test_unmap_cycles(int fd, int expected)
+{
+	int i;
+
+	for (i = 0; i < 1000; i++)
+		test_unmap(fd, expected);
+
+	return 0;
+}
+
+int main(int argc, char **argv)
+{
+	uint64_t aperture_size;
+	unsigned int total_ram;
+	int fd = -1, count = 0, size = 0, ret;
+
+	igt_skip_on_simulation();
+
+	igt_subtest_init(argc, argv);
+
+	igt_fixture {
+		fd = drm_open_any();
+		igt_assert(fd >= 0);
+
+		ret = has_userptr(fd);
+		igt_skip_on_f(ret == 0, "No userptr support - %s (%d)\n",
+			      strerror(errno), ret);
+
+		size = sizeof(linear);
+
+		aperture_size = gem_aperture_size(fd);
+		printf("Aperture size is %lu MiB\n", (long)(aperture_size / (1024*1024)));
+
+		if (argc > 1)
+			count = atoi(argv[1]);
+		if (count == 0)
+			count = 2 * aperture_size / (1024*1024) / 3;
+
+		total_ram = intel_get_total_ram_mb();
+		printf("Total RAM is %u MiB\n", total_ram);
+
+		if (count > total_ram * 3 / 4) {
+			count = intel_get_total_ram_mb() * 3 / 4;
+			printf("Not enough RAM to run test, reducing buffer count.\n");
+		}
+	}
+
+	igt_subtest("input-checking")
+		test_input_checking(fd);
+
+	igt_subtest("usage-restrictions")
+		test_usage_restrictions(fd);
+
+	printf("Testing unsynchronized mappings...\n");
+	gem_userptr_test_unsynchronized();
+
+	igt_subtest("create-destroy-unsync")
+		test_create_destroy(fd);
+
+	igt_subtest("unsync-overlap")
+		test_overlap(fd, 0);
+
+	igt_subtest("unsync-unmap")
+		test_unmap(fd, 0);
+
+	igt_subtest("unsync-unmap-cycles")
+		test_unmap_cycles(fd, 0);
+
+	igt_subtest("unsync-unmap-after-close")
+		test_unmap_after_close(fd);
+
+	igt_subtest("coherency-unsync")
+		test_coherency(fd, count);
+
+	for (unsigned flags = 0; flags < ALL_FORKING_EVICTIONS + 1; flags++) {
+		igt_subtest_f("forked-unsync%s%s%s-%s",
+		    flags & FORKING_EVICTIONS_SWAPPING ? "-swapping" : "",
+		    flags & FORKING_EVICTIONS_DUP_DRMFD ? "-multifd" : "",
+		    flags & FORKING_EVICTIONS_MEMORY_PRESSURE ?
+				"-mempressure" : "",
+		    flags & FORKING_EVICTIONS_INTERRUPTIBLE ?
+				"interruptible" : "normal") {
+			forked_evictions(fd, size, count, flags);
+		}
+	}
+
+	igt_subtest("swapping-unsync-normal")
+		swapping_evictions(fd, size, count);
+
+	igt_subtest("minor-unsync-normal")
+		minor_evictions(fd, size, count);
+
+	igt_subtest("major-unsync-normal") {
+		size = 200 * 1024 * 1024;
+		count = (gem_aperture_size(fd) / size) + 2;
+		major_evictions(fd, size, count);
+	}
+
+	igt_fixture {
+		size = sizeof(linear);
+		count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
+		if (count > total_ram * 3 / 4)
+			count = intel_get_total_ram_mb() * 3 / 4;
+	}
+
+	igt_fork_signal_helper();
+
+	igt_subtest("swapping-unsync-interruptible")
+		swapping_evictions(fd, size, count);
+
+	igt_subtest("minor-unsync-interruptible")
+		minor_evictions(fd, size, count);
+
+	igt_subtest("major-unsync-interruptible") {
+		size = 200 * 1024 * 1024;
+		count = (gem_aperture_size(fd) / size) + 2;
+		major_evictions(fd, size, count);
+	}
+
+	igt_stop_signal_helper();
+
+	printf("Testing synchronized mappings...\n");
+
+	igt_fixture {
+		size = sizeof(linear);
+		count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
+		if (count > total_ram * 3 / 4)
+			count = intel_get_total_ram_mb() * 3 / 4;
+	}
+
+	gem_userptr_test_synchronized();
+
+	igt_subtest("create-destroy-sync")
+		test_create_destroy(fd);
+
+	igt_subtest("sync-overlap")
+		test_overlap(fd, EINVAL);
+
+	igt_subtest("sync-unmap")
+		test_unmap(fd, EFAULT);
+
+	igt_subtest("sync-unmap-cycles")
+		test_unmap_cycles(fd, EFAULT);
+
+	igt_subtest("sync-unmap-after-close")
+		test_unmap_after_close(fd);
+
+	igt_subtest("coherency-sync")
+		test_coherency(fd, count);
+
+	for (unsigned flags = 0; flags < ALL_FORKING_EVICTIONS + 1; flags++) {
+		igt_subtest_f("forked-sync%s%s%s-%s",
+		    flags & FORKING_EVICTIONS_SWAPPING ? "-swapping" : "",
+		    flags & FORKING_EVICTIONS_DUP_DRMFD ? "-multifd" : "",
+		    flags & FORKING_EVICTIONS_MEMORY_PRESSURE ?
+				"-mempressure" : "",
+		    flags & FORKING_EVICTIONS_INTERRUPTIBLE ?
+				"interruptible" : "normal") {
+			forked_evictions(fd, size, count, flags);
+		}
+	}
+
+	igt_subtest("swapping-normal-sync")
+		swapping_evictions(fd, size, count);
+
+	igt_subtest("minor-normal-sync")
+		minor_evictions(fd, size, count);
+
+	igt_subtest("major-normal-sync") {
+		size = 200 * 1024 * 1024;
+		count = (gem_aperture_size(fd) / size) + 2;
+		major_evictions(fd, size, count);
+	}
+
+	igt_fixture {
+		size = 1024 * 1024;
+		count = 2 * gem_aperture_size(fd) / (1024*1024) / 3;
+		if (count > total_ram * 3 / 4)
+			count = intel_get_total_ram_mb() * 3 / 4;
+	}
+
+	igt_fork_signal_helper();
+
+	igt_subtest("swapping-sync-interruptible")
+		swapping_evictions(fd, size, count);
+
+	igt_subtest("minor-sync-interruptible")
+		minor_evictions(fd, size, count);
+
+	igt_subtest("major-sync-interruptible") {
+		size = 200 * 1024 * 1024;
+		count = (gem_aperture_size(fd) / size) + 2;
+		major_evictions(fd, size, count);
+	}
+
+	igt_stop_signal_helper();
+
+	igt_subtest("access-control")
+	test_access_control(fd);
+
+	igt_exit();
+
+	return 0;
+}
diff --git a/tests/gem_vmap_blits.c b/tests/gem_vmap_blits.c
deleted file mode 100644
index 48297af..0000000
--- a/tests/gem_vmap_blits.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright © 2009,2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Eric Anholt <eric@xxxxxxxxxx>
- *    Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
- *
- */
-
-/** @file gem_vmap_blits.c
- *
- * This is a test of doing many blits using a mixture of normal system pages
- * and uncached linear buffers, with a working set larger than the
- * aperture size.
- *
- * The goal is to simply ensure the basics work.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include "drm.h"
-#include "i915_drm.h"
-#include "drmtest.h"
-#include "intel_bufmgr.h"
-#include "intel_batchbuffer.h"
-#include "intel_gpu_tools.h"
-
-#if !defined(I915_PARAM_HAS_VMAP)
-#pragma message("No vmap support in drm, skipping")
-int main(int argc, char **argv)
-{
-	fprintf(stderr, "No vmap support in drm.\n");
-	return 77;
-}
-#else
-
-#define WIDTH 512
-#define HEIGHT 512
-
-static uint32_t linear[WIDTH*HEIGHT];
-
-static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
-{
-	struct drm_i915_gem_vmap vmap;
-
-	vmap.user_ptr = (uintptr_t)ptr;
-	vmap.user_size = size;
-	vmap.flags = 0;
-	if (read_only)
-		vmap.flags |= I915_VMAP_READ_ONLY;
-
-	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_VMAP, &vmap))
-		return 0;
-
-	return vmap.handle;
-}
-
-
-static void gem_vmap_sync(int fd, uint32_t handle)
-{
-	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
-}
-
-static void
-copy(int fd, uint32_t dst, uint32_t src)
-{
-	uint32_t batch[10];
-	struct drm_i915_gem_relocation_entry reloc[2];
-	struct drm_i915_gem_exec_object2 obj[3];
-	struct drm_i915_gem_execbuffer2 exec;
-	uint32_t handle;
-	int ret;
-
-	batch[0] = XY_SRC_COPY_BLT_CMD |
-		  XY_SRC_COPY_BLT_WRITE_ALPHA |
-		  XY_SRC_COPY_BLT_WRITE_RGB | 6;
-	batch[1] = (3 << 24) | /* 32 bits */
-		  (0xcc << 16) | /* copy ROP */
-		  WIDTH*4;
-	batch[2] = 0; /* dst x1,y1 */
-	batch[3] = (HEIGHT << 16) | WIDTH; /* dst x2,y2 */
-	batch[4] = 0; /* dst reloc */
-	batch[5] = 0; /* src x1,y1 */
-	batch[6] = WIDTH*4;
-	batch[7] = 0; /* src reloc */
-	batch[8] = MI_BATCH_BUFFER_END;
-	batch[9] = MI_NOOP;
-
-	handle = gem_create(fd, 4096);
-	gem_write(fd, handle, 0, batch, sizeof(batch));
-
-	reloc[0].target_handle = dst;
-	reloc[0].delta = 0;
-	reloc[0].offset = 4 * sizeof(batch[0]);
-	reloc[0].presumed_offset = 0;
-	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;;
-	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
-
-	reloc[1].target_handle = src;
-	reloc[1].delta = 0;
-	reloc[1].offset = 7 * sizeof(batch[0]);
-	reloc[1].presumed_offset = 0;
-	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;;
-	reloc[1].write_domain = 0;
-
-	obj[0].handle = dst;
-	obj[0].relocation_count = 0;
-	obj[0].relocs_ptr = 0;
-	obj[0].alignment = 0;
-	obj[0].offset = 0;
-	obj[0].flags = 0;
-	obj[0].rsvd1 = 0;
-	obj[0].rsvd2 = 0;
-
-	obj[1].handle = src;
-	obj[1].relocation_count = 0;
-	obj[1].relocs_ptr = 0;
-	obj[1].alignment = 0;
-	obj[1].offset = 0;
-	obj[1].flags = 0;
-	obj[1].rsvd1 = 0;
-	obj[1].rsvd2 = 0;
-
-	obj[2].handle = handle;
-	obj[2].relocation_count = 2;
-	obj[2].relocs_ptr = (uintptr_t)reloc;
-	obj[2].alignment = 0;
-	obj[2].offset = 0;
-	obj[2].flags = 0;
-	obj[2].rsvd1 = obj[2].rsvd2 = 0;
-
-	exec.buffers_ptr = (uintptr_t)obj;
-	exec.buffer_count = 3;
-	exec.batch_start_offset = 0;
-	exec.batch_len = sizeof(batch);
-	exec.DR1 = exec.DR4 = 0;
-	exec.num_cliprects = 0;
-	exec.cliprects_ptr = 0;
-	exec.flags = HAS_BLT_RING(intel_get_drm_devid(fd)) ? I915_EXEC_BLT : 0;
-	exec.rsvd1 = exec.rsvd2 = 0;
-
-	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
-	while (ret && errno == EBUSY) {
-		drmCommandNone(fd, DRM_I915_GEM_THROTTLE);
-		ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &exec);
-	}
-	igt_assert(ret == 0);
-
-	gem_close(fd, handle);
-}
-
-static uint32_t
-create_vmap(int fd, uint32_t val, uint32_t *ptr)
-{
-	uint32_t handle;
-	int i;
-
-	handle = gem_vmap(fd, ptr, sizeof(linear), 0);
-
-	/* Fill the BO with dwords starting at val */
-	for (i = 0; i < WIDTH*HEIGHT; i++)
-		ptr[i] = val++;
-
-	return handle;
-}
-
-static uint32_t
-create_bo(int fd, uint32_t val)
-{
-	uint32_t handle;
-	int i;
-
-	handle = gem_create(fd, sizeof(linear));
-
-	/* Fill the BO with dwords starting at val */
-	for (i = 0; i < WIDTH*HEIGHT; i++)
-		linear[i] = val++;
-	gem_write(fd, handle, 0, linear, sizeof(linear));
-
-	return handle;
-}
-
-static void
-check_cpu(uint32_t *ptr, uint32_t val)
-{
-	int i;
-
-	for (i = 0; i < WIDTH*HEIGHT; i++) {
-		if (ptr[i] != val) {
-			fprintf(stderr, "Expected 0x%08x, found 0x%08x "
-				"at offset 0x%08x\n",
-				val, ptr[i], i * 4);
-			abort();
-		}
-		val++;
-	}
-}
-
-static void
-check_gpu(int fd, uint32_t handle, uint32_t val)
-{
-	gem_read(fd, handle, 0, linear, sizeof(linear));
-	check_cpu(linear, val);
-}
-
-static int has_vmap(int fd)
-{
-	drm_i915_getparam_t gp;
-	int i;
-
-	gp.param = I915_PARAM_HAS_VMAP;
-	gp.value = &i;
-
-	return drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == 0 && i > 0;
-}
-
-int main(int argc, char **argv)
-{
-	uint32_t *memory;
-	uint32_t *cpu, *cpu_val;
-	uint32_t *gpu, *gpu_val;
-	uint32_t start = 0;
-	int i, fd, count;
-
-	igt_simple_init();
-
-	igt_skip_on_simulation();
-
-	fd = drm_open_any();
-
-	if (!has_vmap(fd)) {
-		fprintf(stderr, "No vmap support, ignoring.\n");
-		return 77;
-	}
-
-	count = 0;
-	if (argc > 1)
-		count = atoi(argv[1]);
-	if (count == 0)
-		count = 3 * gem_aperture_size(fd) / (1024*1024) / 4;
-	printf("Using 2x%d 1MiB buffers\n", count);
-
-	memory = malloc(count*sizeof(linear));
-	if (memory == NULL) {
-		fprintf(stderr, "Unable to allocate %lld bytes\n",
-			(long long)count*sizeof(linear));
-		return 1;
-	}
-
-	gpu = malloc(sizeof(uint32_t)*count*4);
-	gpu_val = gpu + count;
-	cpu = gpu_val + count;
-	cpu_val = cpu + count;
-
-	for (i = 0; i < count; i++) {
-		gpu[i] = create_bo(fd, start);
-		gpu_val[i] = start;
-		start += WIDTH*HEIGHT;
-	}
-
-	for (i = 0; i < count; i++) {
-		cpu[i] = create_vmap(fd, start, memory+i*WIDTH*HEIGHT);
-		cpu_val[i] = start;
-		start += WIDTH*HEIGHT;;
-	}
-
-	printf("Verifying initialisation...\n");
-	for (i = 0; i < count; i++) {
-		check_gpu(fd, gpu[i], gpu_val[i]);
-		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
-	}
-
-	printf("Cyclic blits cpu->gpu, forward...\n");
-	for (i = 0; i < count * 4; i++) {
-		int src = i % count;
-		int dst = (i + 1) % count;
-
-		copy(fd, gpu[dst], cpu[src]);
-		gpu_val[dst] = cpu_val[src];
-	}
-	for (i = 0; i < count; i++)
-		check_gpu(fd, gpu[i], gpu_val[i]);
-
-	printf("Cyclic blits gpu->cpu, backward...\n");
-	for (i = 0; i < count * 4; i++) {
-		int src = (i + 1) % count;
-		int dst = i % count;
-
-		copy(fd, cpu[dst], gpu[src]);
-		cpu_val[dst] = gpu_val[src];
-	}
-	for (i = 0; i < count; i++) {
-		gem_vmap_sync(fd, cpu[i]);
-		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
-	}
-
-	printf("Random blits...\n");
-	for (i = 0; i < count * 4; i++) {
-		int src = random() % count;
-		int dst = random() % count;
-
-		if (random() & 1) {
-			copy(fd, gpu[dst], cpu[src]);
-			gpu_val[dst] = cpu_val[src];
-		} else {
-			copy(fd, cpu[dst], gpu[src]);
-			cpu_val[dst] = gpu_val[src];
-		}
-	}
-	for (i = 0; i < count; i++) {
-		check_gpu(fd, gpu[i], gpu_val[i]);
-		gem_vmap_sync(fd, cpu[i]);
-		check_cpu(memory+i*WIDTH*HEIGHT, cpu_val[i]);
-	}
-
-	return 0;
-}
-
-#endif
-- 
1.7.9.7

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx





[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux