Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > If the idle_pulse fails to flush the i915_active, dump the tree to see > if that has any clues. > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > .../drm/i915/gt/selftest_engine_heartbeat.c | 4 ++ > drivers/gpu/drm/i915/i915_active.h | 2 + > drivers/gpu/drm/i915/selftests/i915_active.c | 45 +++++++++++++++++++ > 3 files changed, 51 insertions(+) > > diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c > index 155c508024df..131c49ddf33f 100644 > --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c > +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c > @@ -100,8 +100,12 @@ static int __live_idle_pulse(struct intel_engine_cs *engine, > pulse_unlock_wait(p); /* synchronize with the retirement callback */ > > if (!i915_active_is_idle(&p->active)) { > + struct drm_printer m = drm_err_printer("pulse"); > + > pr_err("%s: heartbeat pulse did not flush idle tasks\n", > engine->name); > + i915_active_print(&p->active, &m); > + > err = -EINVAL; > goto out; > } > diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h > index 4f52fe6146d2..44859356ce97 100644 > --- a/drivers/gpu/drm/i915/i915_active.h > +++ b/drivers/gpu/drm/i915/i915_active.h > @@ -214,4 +214,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, > void i915_active_acquire_barrier(struct i915_active *ref); > void i915_request_add_active_barriers(struct i915_request *rq); > > +void i915_active_print(struct i915_active *ref, struct drm_printer *m); > + > #endif /* _I915_ACTIVE_H_ */ > diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c > index 96513a7d4739..260b0ee5d1e3 100644 > --- a/drivers/gpu/drm/i915/selftests/i915_active.c > +++ b/drivers/gpu/drm/i915/selftests/i915_active.c > @@ -205,3 +205,48 @@ int i915_active_live_selftests(struct drm_i915_private *i915) > > return i915_subtests(tests, i915); > } > + > +static struct intel_engine_cs *node_to_barrier(struct active_node *it) > +{ > + struct intel_engine_cs *engine; > + > + if (!is_barrier(&it->base)) > + return NULL; > + > + engine = __barrier_to_engine(it); > + smp_rmb(); /* serialise with add_active_barriers */ I did find the pair. Builds confidence. > + if (!is_barrier(&it->base)) > + return NULL; > + > + return engine; > +} > + > +void i915_active_print(struct i915_active *ref, struct drm_printer *m) > +{ > + drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire); > + drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count)); > + drm_printf(m, "\tpreallocated barriers? %s\n", > + yesno(!llist_empty(&ref->preallocated_barriers))); > + > + if (i915_active_acquire_if_busy(ref)) { > + struct active_node *it, *n; > + > + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { > + struct intel_engine_cs *engine; > + Does the aquire of ref keep the other lefs alive? we seem to be safe on interation but the poking about the fence set and timeline below is a question mark. -Mika > + engine = node_to_barrier(it); > + if (engine) { > + drm_printf(m, "\tbarrier: %s\n", engine->name); > + continue; > + } > + > + if (i915_active_fence_isset(&it->base)) { > + drm_printf(m, > + "\ttimeline: %llx\n", it->timeline); > + continue; > + } > + } > + > + i915_active_release(ref); > + } > +} > -- > 2.24.0.rc1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx