Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > In some of our hangtests, we try to reset an active engine while it is > spinning inside the recursive spinner. However, we also try to flood the > engine with requests that preempt the hang, and so should disable the > preemption to be sure that we reset the right request. > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 36 ++++++++++++++------ > 1 file changed, 26 insertions(+), 10 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c > index 4aa4cc917d8b..035f363fb0f8 100644 > --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c > +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c > @@ -203,12 +203,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) > *batch++ = lower_32_bits(hws_address(hws, rq)); > *batch++ = upper_32_bits(hws_address(hws, rq)); > *batch++ = rq->fence.seqno; > - *batch++ = MI_ARB_CHECK; > + *batch++ = MI_NOOP; > > memset(batch, 0, 1024); > batch += 1024 / sizeof(*batch); > > - *batch++ = MI_ARB_CHECK; > + *batch++ = MI_NOOP; > *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; > *batch++ = lower_32_bits(vma->node.start); > *batch++ = upper_32_bits(vma->node.start); > @@ -217,12 +217,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) > *batch++ = 0; > *batch++ = lower_32_bits(hws_address(hws, rq)); > *batch++ = rq->fence.seqno; > - *batch++ = MI_ARB_CHECK; > + *batch++ = MI_NOOP; > > memset(batch, 0, 1024); > batch += 1024 / sizeof(*batch); > > - *batch++ = MI_ARB_CHECK; > + *batch++ = MI_NOOP; > *batch++ = MI_BATCH_BUFFER_START | 1 << 8; > *batch++ = lower_32_bits(vma->node.start); > } else if (INTEL_GEN(gt->i915) >= 4) { > @@ -230,24 +230,24 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) > *batch++ = 0; > *batch++ = lower_32_bits(hws_address(hws, rq)); > *batch++ = rq->fence.seqno; > - *batch++ = MI_ARB_CHECK; > + *batch++ = MI_NOOP; > > memset(batch, 0, 1024); > batch += 1024 / sizeof(*batch); > > - *batch++ = MI_ARB_CHECK; > + *batch++ = MI_NOOP; > *batch++ = MI_BATCH_BUFFER_START | 2 << 6; > *batch++ = lower_32_bits(vma->node.start); > } else { > *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; > *batch++ = lower_32_bits(hws_address(hws, rq)); > *batch++ = rq->fence.seqno; > - *batch++ = MI_ARB_CHECK; > + *batch++ = MI_NOOP; > > memset(batch, 0, 1024); > batch += 1024 / sizeof(*batch); > > - *batch++ = MI_ARB_CHECK; > + *batch++ = MI_NOOP; > *batch++ = MI_BATCH_BUFFER_START | 2 << 6; > *batch++ = lower_32_bits(vma->node.start); > } > @@ -866,13 +866,29 @@ static int __igt_reset_engines(struct intel_gt *gt, > count++; > > if (rq) { > + if (rq->fence.error != -EIO) { > + pr_err("i915_reset_engine(%s:%s):" > + " failed to reset request %llx:%lld\n", > + engine->name, test_name, > + rq->fence.context, > + rq->fence.seqno); > + i915_request_put(rq); > + > + GEM_TRACE_DUMP(); > + intel_gt_set_wedged(gt); > + err = -EIO; > + break; > + } > + > if (i915_request_wait(rq, 0, HZ / 5) < 0) { > struct drm_printer p = > drm_info_printer(gt->i915->drm.dev); > > pr_err("i915_reset_engine(%s:%s):" > - " failed to complete request after reset\n", > - engine->name, test_name); > + " failed to complete request %llx:%lld after reset\n", > + engine->name, test_name, > + rq->fence.context, > + rq->fence.seqno); > intel_engine_dump(engine, &p, > "%s\n", engine->name); > i915_request_put(rq); > -- > 2.20.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx