On 03/02/20 13:45, Chris Wilson wrote:
The gtt/readonly tests are nothing to do with execution and engines;
they are strictly checking the copy-from-user of the ioctl arguments.
Drop the silly per-engine tests.
Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: Antonio Argenziano <antonio.argenziano@xxxxxxxxx>
---
tests/i915/gem_exec_basic.c | 109 +++++------------------------------
tests/i915/gem_exec_params.c | 61 ++++++++++++++++++++
2 files changed, 76 insertions(+), 94 deletions(-)
diff --git a/tests/i915/gem_exec_basic.c b/tests/i915/gem_exec_basic.c
index 70dce34b5..0d05819ce 100644
--- a/tests/i915/gem_exec_basic.c
+++ b/tests/i915/gem_exec_basic.c
@@ -36,84 +36,6 @@ static uint32_t batch_create(int fd)
return handle;
}
-static void batch_fini(int fd, uint32_t handle)
-{
- gem_sync(fd, handle); /* catch any GPU hang */
- gem_close(fd, handle);
-}
-
-static void noop(int fd, uint64_t flags)
-{
- struct drm_i915_gem_execbuffer2 execbuf;
- struct drm_i915_gem_exec_object2 exec;
-
- gem_require_ring(fd, flags);
-
- memset(&exec, 0, sizeof(exec));
-
- exec.handle = batch_create(fd);
-
- memset(&execbuf, 0, sizeof(execbuf));
- execbuf.buffers_ptr = to_user_pointer(&exec);
- execbuf.buffer_count = 1;
- execbuf.flags = flags;
- gem_execbuf(fd, &execbuf);
-
- batch_fini(fd, exec.handle);
-}
-
-static void readonly(int fd, uint64_t flags)
-{
- struct drm_i915_gem_execbuffer2 *execbuf;
- struct drm_i915_gem_exec_object2 exec;
-
- gem_require_ring(fd, flags);
-
- memset(&exec, 0, sizeof(exec));
- exec.handle = batch_create(fd);
-
- execbuf = mmap(NULL, 4096, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
- igt_assert(execbuf != NULL);
-
- execbuf->buffers_ptr = to_user_pointer(&exec);
- execbuf->buffer_count = 1;
- execbuf->flags = flags;
- igt_assert(mprotect(execbuf, 4096, PROT_READ) == 0);
-
- gem_execbuf(fd, execbuf);
-
- munmap(execbuf, 4096);
-
- batch_fini(fd, exec.handle);
-}
-
-static void gtt(int fd, uint64_t flags)
-{
- struct drm_i915_gem_execbuffer2 *execbuf;
- struct drm_i915_gem_exec_object2 *exec;
- uint32_t handle;
-
- gem_require_ring(fd, flags);
-
- handle = gem_create(fd, 4096);
-
- gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
- execbuf = gem_mmap__gtt(fd, handle, 4096, PROT_WRITE);
- exec = (struct drm_i915_gem_exec_object2 *)(execbuf + 1);
- gem_close(fd, handle);
-
- exec->handle = batch_create(fd);
-
- execbuf->buffers_ptr = to_user_pointer(exec);
- execbuf->buffer_count = 1;
- execbuf->flags = flags;
-
- gem_execbuf(fd, execbuf);
-
- batch_fini(fd, exec->handle);
- munmap(execbuf, 4096);
-}
-
igt_main
{
const struct intel_execution_engine2 *e;
@@ -121,30 +43,29 @@ igt_main
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
- igt_require_gem(fd);
-
+ /* igt_require_gem(fd); // test is mandatory */
What if that fd is bad? Assert?
igt_fork_hang_detector(fd);
}
igt_subtest_with_dynamic("basic") {
- __for_each_physical_engine(fd, e) {
- igt_dynamic_f("%s", e->name)
- noop(fd, e->flags);
- }
- }
+ struct drm_i915_gem_exec_object2 exec = {
+ .handle = batch_create(fd),
+ };
- igt_subtest_with_dynamic("readonly") {
__for_each_physical_engine(fd, e) {
- igt_dynamic_f("%s", e->name)
- readonly(fd, e->flags);
+ igt_dynamic_f("%s", e->name) {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&exec),
+ .buffer_count = 1,
+ .flags = e->flags,
+ };
+
+ gem_execbuf(fd, &execbuf);
+ }
}
- }
- igt_subtest_with_dynamic("gtt") {
- __for_each_physical_engine(fd, e) {
- igt_dynamic_f("%s", e->name)
- gtt(fd, e->flags);
- }
+ gem_sync(fd, exec.handle); /* catch any GPU hang */
+ gem_close(fd, exec.handle);
}
igt_fixture {
diff --git a/tests/i915/gem_exec_params.c b/tests/i915/gem_exec_params.c
index 9c3525698..094fa904c 100644
--- a/tests/i915/gem_exec_params.c
+++ b/tests/i915/gem_exec_params.c
@@ -206,6 +206,61 @@ static int has_secure_batches(const int fd)
return v > 0;
}
+static uint32_t batch_create(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ uint32_t handle;
+
+ handle = gem_create(fd, 4096);
+ gem_write(fd, handle, 0, &bbe, sizeof(bbe));
+
+ return handle;
+}
+
+static void readonly(int i915)
+{
+ struct drm_i915_gem_execbuffer2 *execbuf;
+ struct drm_i915_gem_exec_object2 exec = {
+ batch_create(i915)
+ };
+
+ execbuf = mmap(NULL, 4096, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
+ igt_assert(execbuf != MAP_FAILED);
+
+ execbuf->buffers_ptr = to_user_pointer(&exec);
+ execbuf->buffer_count = 1;
+ igt_assert(mprotect(execbuf, 4096, PROT_READ) == 0);
+
+ gem_execbuf(i915, execbuf);
+ gem_close(i915, exec.handle);
+
+ munmap(execbuf, 4096);
+}
+
+static void mmapped(int i915)
+{
+ struct drm_i915_gem_execbuffer2 *execbuf;
+ struct drm_i915_gem_exec_object2 *exec;
+ uint32_t handle;
gem_require_mappable_ggtt()?
Antonio
+
+ handle = gem_create(i915, 4096);
+
+ gem_set_domain(i915, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ execbuf = gem_mmap__gtt(i915, handle, 4096, PROT_WRITE);
+ exec = (struct drm_i915_gem_exec_object2 *)(execbuf + 1);
+ gem_close(i915, handle);
+
+ exec->handle = batch_create(i915);
+
+ execbuf->buffers_ptr = to_user_pointer(exec);
+ execbuf->buffer_count = 1;
+
+ gem_execbuf(i915, execbuf);
+ gem_close(i915, exec->handle);
+
+ munmap(execbuf, 4096);
+}
+
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 gem_exec[1];
uint32_t batch[2] = {MI_BATCH_BUFFER_END};
@@ -256,6 +311,12 @@ igt_main
}
}
+ igt_subtest("readonly")
+ readonly(fd);
+
+ igt_subtest("mmapped")
+ mmapped(fd);
+
#define RUN_FAIL(expected_errno) do { \
igt_assert_eq(__gem_execbuf(fd, &execbuf), -expected_errno); \
} while(0)
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx