Check that we can run a second request even if an equal priority spinner is hogging the engine. Extend the testing with some undying timeslice behaviour that requires hangcheck to intervene. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Lionel Landwerlin <lionel.g.landwerlin@xxxxxxxxx> Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> --- tests/i915/gem_exec_schedule.c | 109 +++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c index ddcb1f21a..461e8de09 100644 --- a/tests/i915/gem_exec_schedule.c +++ b/tests/i915/gem_exec_schedule.c @@ -1060,6 +1060,107 @@ static void preempt_queue(int fd, unsigned ring, unsigned int flags) } } +static void preempt_timeslice(int i915, unsigned ring) +{ + const uint32_t bbe = MI_BATCH_BUFFER_END; + struct drm_i915_gem_exec_object2 obj = { + .handle = gem_create(i915, 4096) + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + .flags = ring, + .rsvd1 = gem_context_create(i915), + }; + igt_spin_t *spin; + + /* + * Launch a spinner to occupy the target engine, and then + * check we execute a ping underneath it from a second context. + */ + spin = igt_spin_new(i915, .engine = ring, .flags = IGT_SPIN_POLL_RUN); + igt_spin_busywait_until_started(spin); + + /* Both the active spinner and this are at the same priority */ + gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe)); + gem_execbuf(i915, &execbuf); + gem_sync(i915, obj.handle); + + igt_assert(gem_bo_busy(i915, spin->handle)); + igt_spin_free(i915, spin); + + gem_context_destroy(i915, execbuf.rsvd1); + gem_close(i915, obj.handle); +} + +static void preempt_timeslice_undying(int i915, unsigned ring) +{ + struct drm_i915_gem_exec_object2 obj = { + .handle = gem_create(i915, 4096) + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + .flags = ring, + .rsvd1 = gem_context_create(i915), + }; + igt_spin_t *spin; + + /* + * We should not allow a spinner to evade hangcheck by simply + * being timesliced. + */ + spin = igt_spin_new(i915, .engine = ring, .flags = IGT_SPIN_POLL_RUN); + igt_spin_busywait_until_started(spin); + + for (int i = 0; i < 120; i++) { + const uint32_t bbe = MI_BATCH_BUFFER_END; + + if (!gem_bo_busy(i915, spin->handle)) + break; + + gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe)); + gem_execbuf(i915, &execbuf); + + usleep(500 * 1000); /* 0.5s */ + } + + igt_assert(!gem_bo_busy(i915, spin->handle)); + igt_spin_free(i915, spin); + + gem_context_destroy(i915, execbuf.rsvd1); + gem_close(i915, obj.handle); +} + +static void preempt_antitimeslice(int i915, unsigned ring) +{ + uint32_t ctx[2] = { gem_context_create(i915), gem_context_create(i915) }; + igt_spin_t *spin[2]; + + /* + * Launch two independent spinners to occupy an engine. Timeslicing + * should not allow them to bypass hangcheck and run indefinitely. + */ + for (int i = 0; i < ARRAY_SIZE(spin); i++) + spin[i] = igt_spin_new(i915, ctx[i], + .engine = ring, + .flags = IGT_SPIN_FENCE_OUT); + + /* Hangcheck should kill each spinner after about 10s */ + for (int i = 0; i < ARRAY_SIZE(spin); i++) { + int64_t timeout = 60ull * NSEC_PER_SEC; + igt_assert_eq(gem_wait(i915, spin[i]->handle, &timeout), 0); + } + + for (int i = 0; i < ARRAY_SIZE(spin); i++) { + igt_assert(!gem_bo_busy(i915, spin[i]->handle)); + igt_assert_eq(sync_fence_status(spin[i]->out_fence), -EIO); + + igt_spin_free(i915, spin[i]); + gem_context_destroy(i915, ctx[i]); + } +} + static void preempt_self(int fd, unsigned ring) { uint32_t result = gem_create(fd, 4096); @@ -1773,6 +1874,8 @@ igt_main igt_subtest_f("preempt-queue-contexts-chain-%s", e->name) preempt_queue(fd, e->exec_id | e->flags, CONTEXTS | CHAIN); + igt_subtest_f("preempt-timeslice-%s", e->name) + preempt_timeslice(fd, e->exec_id | e->flags); igt_subtest_group { igt_hang_t hang; @@ -1788,6 +1891,12 @@ igt_main igt_subtest_f("preemptive-hang-%s", e->name) preemptive_hang(fd, e->exec_id | e->flags); + igt_subtest_f("preempt-timeslice-undying-%s", e->name) + preempt_timeslice_undying(fd, e->exec_id | e->flags); + + igt_subtest_f("preempt-antitimeslice-%s", e->name) + preempt_antitimeslice(fd, e->exec_id | e->flags); + igt_fixture { igt_disallow_hang(fd, hang); igt_fork_hang_detector(fd); -- 2.23.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx