[PATCH igt 2/2] igt/gem_eio: Use slow spinners to inject hangs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



One weird issue we see in bug 104676 is that the hangs are too fast on
HSW! So force the use of the slow spinners that do not try to trigger
a hang by injecting random bytes into the batch.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=104676
Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 lib/igt_gt.c    |  1 +
 tests/gem_eio.c | 84 +++++++++++++++++++++++++++++++++------------------------
 2 files changed, 50 insertions(+), 35 deletions(-)

diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index ad6e62053..f70fcb925 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -402,6 +402,7 @@ void igt_force_gpu_reset(int drm_fd)
 	igt_sysfs_scanf(dir, "i915_wedged", "%d", &wedged);
 
 	close(dir);
+	errno = 0;
 
 	igt_assert(!wedged);
 }
diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 16a89ecda..11776306f 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -67,6 +67,8 @@ static void trigger_reset(int fd)
 	/* And just check the gpu is indeed running again */
 	igt_debug("Checking that the GPU recovered\n");
 	gem_test_engine(fd, -1);
+
+	gem_quiescent_gpu(fd);
 }
 
 static void wedge_gpu(int fd)
@@ -133,15 +135,17 @@ static void test_execbuf(int fd)
 
 static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
 {
-	struct drm_i915_gem_wait wait;
-	int err = 0;
+	struct drm_i915_gem_wait wait = {
+		.bo_handle = handle,
+		.timeout_ns = timeout,
+	};
+	int err;
 
-	memset(&wait, 0, sizeof(wait));
-	wait.bo_handle = handle;
-	wait.timeout_ns = timeout;
+	err = 0;
 	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait))
 		err = -errno;
 
+	errno = 0;
 	return err;
 }
 
@@ -149,6 +153,8 @@ static void test_wait(int fd)
 {
 	igt_hang_t hang;
 
+	igt_require_gem(fd);
+
 	/* If the request we wait on completes due to a hang (even for
 	 * that request), the user expects the return value to 0 (success).
 	 */
@@ -182,12 +188,11 @@ static void test_suspend(int fd, int state)
 
 static void test_inflight(int fd)
 {
-	struct drm_i915_gem_execbuffer2 execbuf;
+	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	struct drm_i915_gem_exec_object2 obj[2];
-	uint32_t bbe = MI_BATCH_BUFFER_END;
 	unsigned int engine;
-	int fence[64]; /* conservative estimate of ring size */
 
+	igt_require_gem(fd);
 	igt_require(gem_has_exec_fence(fd));
 
 	memset(obj, 0, sizeof(obj));
@@ -196,13 +201,17 @@ static void test_inflight(int fd)
 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
 
 	for_each_engine(fd, engine) {
-		igt_hang_t hang;
+		struct drm_i915_gem_execbuffer2 execbuf;
+		igt_spin_t *hang;
+		int fence[64]; /* conservative estimate of ring size */
+
+		gem_quiescent_gpu(fd);
 
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = igt_hang_ring(fd, engine);
-		obj[0].handle = hang.handle;
+		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
 		execbuf.buffers_ptr = to_user_pointer(obj);
@@ -215,14 +224,13 @@ static void test_inflight(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_post_hang_ring(fd, hang);
-
 		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
 		}
 
+		igt_spin_batch_free(fd, hang);
 		igt_assert(i915_reset_control(true));
 		trigger_reset(fd);
 	}
@@ -234,8 +242,9 @@ static void test_inflight_suspend(int fd)
 	struct drm_i915_gem_exec_object2 obj[2];
 	uint32_t bbe = MI_BATCH_BUFFER_END;
 	int fence[64]; /* conservative estimate of ring size */
-	igt_hang_t hang;
+	igt_spin_t *hang;
 
+	igt_require_gem(fd);
 	igt_require(gem_has_exec_fence(fd));
 	igt_require(i915_reset_control(false));
 
@@ -244,8 +253,8 @@ static void test_inflight_suspend(int fd)
 	obj[1].handle = gem_create(fd, 4096);
 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
 
-	hang = igt_hang_ring(fd, 0);
-	obj[0].handle = hang.handle;
+	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	obj[0].handle = hang->handle;
 
 	memset(&execbuf, 0, sizeof(execbuf));
 	execbuf.buffers_ptr = to_user_pointer(obj);
@@ -261,27 +270,25 @@ static void test_inflight_suspend(int fd)
 	igt_set_autoresume_delay(30);
 	igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
 
-	igt_post_hang_ring(fd, hang);
-
 	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
 	for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 		igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 		close(fence[n]);
 	}
 
+	igt_spin_batch_free(fd, hang);
 	igt_assert(i915_reset_control(true));
 	trigger_reset(fd);
 }
 
 static void test_inflight_contexts(int fd)
 {
-	struct drm_i915_gem_execbuffer2 execbuf;
 	struct drm_i915_gem_exec_object2 obj[2];
-	uint32_t bbe = MI_BATCH_BUFFER_END;
+	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	unsigned int engine;
 	uint32_t ctx[64];
-	int fence[64];
 
+	igt_require_gem(fd);
 	igt_require(gem_has_exec_fence(fd));
 	gem_require_contexts(fd);
 
@@ -296,13 +303,17 @@ static void test_inflight_contexts(int fd)
 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
 
 	for_each_engine(fd, engine) {
-		igt_hang_t hang;
+		struct drm_i915_gem_execbuffer2 execbuf;
+		igt_spin_t *hang;
+		int fence[64];
+
+		gem_quiescent_gpu(fd);
 
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = igt_hang_ring(fd, engine);
-		obj[0].handle = hang.handle;
+		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
 		execbuf.buffers_ptr = to_user_pointer(obj);
@@ -316,14 +327,13 @@ static void test_inflight_contexts(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_post_hang_ring(fd, hang);
-
 		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
 		}
 
+		igt_spin_batch_free(fd, hang);
 		igt_assert(i915_reset_control(true));
 		trigger_reset(fd);
 	}
@@ -334,12 +344,13 @@ static void test_inflight_contexts(int fd)
 
 static void test_inflight_external(int fd)
 {
+	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct drm_i915_gem_exec_object2 obj;
-	uint32_t bbe = MI_BATCH_BUFFER_END;
-	igt_hang_t hang;
 	int timeline, fence;
+	igt_spin_t *hang;
 
+	igt_require_gem(fd);
 	igt_require_sw_sync();
 	igt_require(gem_has_exec_fence(fd));
 
@@ -347,7 +358,7 @@ static void test_inflight_external(int fd)
 	fence = sw_sync_timeline_create_fence(timeline, 1);
 
 	igt_require(i915_reset_control(false));
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
+	hang = __igt_spin_batch_new(fd, 0, 0, 0);
 
 	memset(&obj, 0, sizeof(obj));
 	obj.handle = gem_create(fd, 4096);
@@ -365,13 +376,16 @@ static void test_inflight_external(int fd)
 	fence = execbuf.rsvd2 >> 32;
 	igt_assert(fence != -1);
 
-	igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
+	gem_sync(fd, hang->handle); /* wedged, with an unready batch */
+	igt_assert(!gem_bo_busy(fd, hang->handle));
+	igt_assert(gem_bo_busy(fd, obj.handle));
 	sw_sync_timeline_inc(timeline, 1); /* only now submit our batches */
 
 	igt_assert_eq(__gem_wait(fd, obj.handle, -1), 0);
 	igt_assert_eq(sync_fence_status(fence), -EIO);
 	close(fence);
 
+	igt_spin_batch_free(fd, hang);
 	igt_assert(i915_reset_control(true));
 	trigger_reset(fd);
 	close(timeline);
@@ -384,15 +398,16 @@ static void test_inflight_internal(int fd)
 	uint32_t bbe = MI_BATCH_BUFFER_END;
 	unsigned engine, nfence = 0;
 	int fences[16];
-	igt_hang_t hang;
+	igt_spin_t *hang;
 
+	igt_require_gem(fd);
 	igt_require(gem_has_exec_fence(fd));
 
 	igt_require(i915_reset_control(false));
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
+	hang = __igt_spin_batch_new(fd, 0, 0, 0);
 
 	memset(obj, 0, sizeof(obj));
-	obj[0].handle = hang.handle;
+	obj[0].handle = hang->handle;
 	obj[0].flags = EXEC_OBJECT_WRITE;
 	obj[1].handle = gem_create(fd, 4096);
 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
@@ -410,14 +425,13 @@ static void test_inflight_internal(int fd)
 		nfence++;
 	}
 
-	igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
-
 	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
 	while (nfence--) {
 		igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
 		close(fences[nfence]);
 	}
 
+	igt_spin_batch_free(fd, hang);
 	igt_assert(i915_reset_control(true));
 	trigger_reset(fd);
 }
-- 
2.16.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux