If we do find ourselves with an idle barrier inside our active while waiting, attempt to flush it by emitting a pulse using the kernel context. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_active.c | 21 ++++++++- drivers/gpu/drm/i915/selftests/i915_active.c | 46 ++++++++++++++++++++ 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 07d39f22a2c3..4c190a548ba7 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -6,6 +6,7 @@ #include <linux/debugobjects.h> +#include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_pm.h" #include "i915_drv.h" @@ -435,6 +436,21 @@ static void enable_signaling(struct i915_active_fence *active) dma_fence_put(fence); } +static int flush_barrier(struct active_node *it) +{ + struct intel_engine_cs *engine; + + if (!is_barrier(&it->base)) + return 0; + + engine = __barrier_to_engine(it); + smp_rmb(); /* serialise with add_active_barriers */ + if (!is_barrier(&it->base)) + return 0; + + return intel_engine_flush_barriers(engine); +} + int i915_active_wait(struct i915_active *ref) { struct active_node *it, *n; @@ -448,8 +464,9 @@ int i915_active_wait(struct i915_active *ref) /* Flush lazy signals */ enable_signaling(&ref->excl); rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - if (is_barrier(&it->base)) /* unconnected idle barrier */ - continue; + err = flush_barrier(it); /* unconnected idle barrier? */ + if (err) + break; enable_signaling(&it->base); } diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 96513a7d4739..aca107cb6176 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -193,11 +193,57 @@ static int live_active_retire(void *arg) return err; } +static int live_active_barrier(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct live_active *active; + int err = 0; + + /* Check that we get a callback when requests retire upon waiting */ + + active = __live_alloc(i915); + if (!active) + return -ENOMEM; + + err = i915_active_acquire(&active->base); + if (err) + goto out; + + for_each_uabi_engine(engine, i915) { + err = i915_active_acquire_preallocate_barrier(&active->base, + engine); + if (err) + break; + + i915_active_acquire_barrier(&active->base); + } + + i915_active_release(&active->base); + + if (err == 0) + err = i915_active_wait(&active->base); + + if (err == 0 && !READ_ONCE(active->retired)) { + pr_err("i915_active not retired after flushing barriers!\n"); + err = -EINVAL; + } + +out: + __live_put(active); + + if (igt_flush_test(i915)) + err = -EIO; + + return err; +} + int i915_active_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_active_wait), SUBTEST(live_active_retire), + SUBTEST(live_active_barrier), }; if (intel_gt_is_wedged(&i915->gt)) -- 2.24.0.rc0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx