Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104676 Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- lib/igt_gt.c | 1 + tests/gem_eio.c | 77 +++++++++++++++++++++++++++++++-------------------------- 2 files changed, 43 insertions(+), 35 deletions(-) diff --git a/lib/igt_gt.c b/lib/igt_gt.c index ad6e62053..f70fcb925 100644 --- a/lib/igt_gt.c +++ b/lib/igt_gt.c @@ -402,6 +402,7 @@ void igt_force_gpu_reset(int drm_fd) igt_sysfs_scanf(dir, "i915_wedged", "%d", &wedged); close(dir); + errno = 0; igt_assert(!wedged); } diff --git a/tests/gem_eio.c b/tests/gem_eio.c index c941f3564..0b563bd0d 100644 --- a/tests/gem_eio.c +++ b/tests/gem_eio.c @@ -69,6 +69,8 @@ static void trigger_reset(int fd) /* And just check the gpu is indeed running again */ igt_debug("Checking that the GPU recovered\n"); gem_test_engine(fd, -1); + + gem_quiescent_gpu(fd); } static void wedge_gpu(int fd) @@ -135,15 +137,17 @@ static void test_execbuf(int fd) static int __gem_wait(int fd, uint32_t handle, int64_t timeout) { - struct drm_i915_gem_wait wait; - int err = 0; + struct drm_i915_gem_wait wait = { + .bo_handle = handle, + .timeout_ns = timeout, + }; + int err; - memset(&wait, 0, sizeof(wait)); - wait.bo_handle = handle; - wait.timeout_ns = timeout; + err = 0; if (drmIoctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait)) err = -errno; + errno = 0; return err; } @@ -184,11 +188,9 @@ static void test_suspend(int fd, int state) static void test_inflight(int fd) { - struct drm_i915_gem_execbuffer2 execbuf; + const uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_exec_object2 obj[2]; - uint32_t bbe = MI_BATCH_BUFFER_END; unsigned int engine; - int fence[64]; /* conservative estimate of ring size */ igt_require(gem_has_exec_fence(fd)); @@ -198,13 +200,17 @@ static void test_inflight(int fd) gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe)); for_each_engine(fd, engine) { - igt_hang_t hang; + struct drm_i915_gem_execbuffer2 execbuf; + igt_spin_t *hang; + int fence[64]; /* conservative estimate of ring size */ + + gem_quiescent_gpu(fd); igt_debug("Starting %s on engine '%s'\n", __func__, e__->name); igt_require(i915_reset_control(false)); - hang = igt_hang_ring(fd, engine); - obj[0].handle = hang.handle; + hang = igt_spin_batch_new(fd, 0, engine, 0); + obj[0].handle = hang->handle; memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = to_user_pointer(obj); @@ -217,14 +223,13 @@ static void test_inflight(int fd) igt_assert(fence[n] != -1); } - igt_post_hang_ring(fd, hang); - igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0); for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) { igt_assert_eq(sync_fence_status(fence[n]), -EIO); close(fence[n]); } + igt_spin_batch_free(fd, hang); igt_assert(i915_reset_control(true)); trigger_reset(fd); } @@ -236,7 +241,7 @@ static void test_inflight_suspend(int fd) struct drm_i915_gem_exec_object2 obj[2]; uint32_t bbe = MI_BATCH_BUFFER_END; int fence[64]; /* conservative estimate of ring size */ - igt_hang_t hang; + igt_spin_t *hang; igt_require(gem_has_exec_fence(fd)); igt_require(i915_reset_control(false)); @@ -246,8 +251,8 @@ static void test_inflight_suspend(int fd) obj[1].handle = gem_create(fd, 4096); gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe)); - hang = igt_hang_ring(fd, 0); - obj[0].handle = hang.handle; + hang = igt_spin_batch_new(fd, 0, 0, 0); + obj[0].handle = hang->handle; memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = to_user_pointer(obj); @@ -263,14 +268,13 @@ static void test_inflight_suspend(int fd) igt_set_autoresume_delay(30); igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE); - igt_post_hang_ring(fd, hang); - igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0); for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) { igt_assert_eq(sync_fence_status(fence[n]), -EIO); close(fence[n]); } + igt_spin_batch_free(fd, hang); igt_assert(i915_reset_control(true)); trigger_reset(fd); } @@ -288,12 +292,10 @@ static uint32_t __gem_context_create(int fd) static void test_inflight_contexts(int fd) { - struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj[2]; - uint32_t bbe = MI_BATCH_BUFFER_END; + const uint32_t bbe = MI_BATCH_BUFFER_END; unsigned int engine; uint32_t ctx[64]; - int fence[64]; igt_require(gem_has_exec_fence(fd)); @@ -308,13 +310,17 @@ static void test_inflight_contexts(int fd) gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe)); for_each_engine(fd, engine) { - igt_hang_t hang; + struct drm_i915_gem_execbuffer2 execbuf; + igt_spin_t *hang; + int fence[64]; + + gem_quiescent_gpu(fd); igt_debug("Starting %s on engine '%s'\n", __func__, e__->name); igt_require(i915_reset_control(false)); - hang = igt_hang_ring(fd, engine); - obj[0].handle = hang.handle; + hang = igt_spin_batch_new(fd, 0, engine, 0); + obj[0].handle = hang->handle; memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = to_user_pointer(obj); @@ -328,14 +334,13 @@ static void test_inflight_contexts(int fd) igt_assert(fence[n] != -1); } - igt_post_hang_ring(fd, hang); - igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0); for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) { igt_assert_eq(sync_fence_status(fence[n]), -EIO); close(fence[n]); } + igt_spin_batch_free(fd, hang); igt_assert(i915_reset_control(true)); trigger_reset(fd); } @@ -346,11 +351,11 @@ static void test_inflight_contexts(int fd) static void test_inflight_external(int fd) { + const uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj; - uint32_t bbe = MI_BATCH_BUFFER_END; - igt_hang_t hang; int timeline, fence; + igt_spin_t *hang; igt_require_sw_sync(); igt_require(gem_has_exec_fence(fd)); @@ -359,7 +364,7 @@ static void test_inflight_external(int fd) fence = sw_sync_timeline_create_fence(timeline, 1); igt_require(i915_reset_control(false)); - hang = igt_hang_ring(fd, I915_EXEC_DEFAULT); + hang = igt_spin_batch_new(fd, 0, 0, 0); memset(&obj, 0, sizeof(obj)); obj.handle = gem_create(fd, 4096); @@ -377,13 +382,16 @@ static void test_inflight_external(int fd) fence = execbuf.rsvd2 >> 32; igt_assert(fence != -1); - igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */ + gem_sync(fd, hang->handle); /* wedged, with an unready batch */ + igt_assert(!gem_bo_busy(fd, hang->handle)); + igt_assert(gem_bo_busy(fd, obj.handle)); sw_sync_timeline_inc(timeline, 1); /* only now submit our batches */ igt_assert_eq(__gem_wait(fd, obj.handle, -1), 0); igt_assert_eq(sync_fence_status(fence), -EIO); close(fence); + igt_spin_batch_free(fd, hang); igt_assert(i915_reset_control(true)); trigger_reset(fd); close(timeline); @@ -396,15 +404,15 @@ static void test_inflight_internal(int fd) uint32_t bbe = MI_BATCH_BUFFER_END; unsigned engine, nfence = 0; int fences[16]; - igt_hang_t hang; + igt_spin_t *hang; igt_require(gem_has_exec_fence(fd)); igt_require(i915_reset_control(false)); - hang = igt_hang_ring(fd, I915_EXEC_DEFAULT); + hang = igt_spin_batch_new(fd, 0, 0, 0); memset(obj, 0, sizeof(obj)); - obj[0].handle = hang.handle; + obj[0].handle = hang->handle; obj[0].flags = EXEC_OBJECT_WRITE; obj[1].handle = gem_create(fd, 4096); gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe)); @@ -422,14 +430,13 @@ static void test_inflight_internal(int fd) nfence++; } - igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */ - igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0); while (nfence--) { igt_assert_eq(sync_fence_status(fences[nfence]), -EIO); close(fences[nfence]); } + igt_spin_batch_free(fd, hang); igt_assert(i915_reset_control(true)); trigger_reset(fd); } -- 2.16.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx