Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > We need to prevent resubmission of the context immediately following an > initial resubmit (which does a lite-restore preemption). Currently we do > this by disabling all submission whilst the context is still active, but > we can improve this by limiting the restriction to only until we > receive notification from the context-switch interrupt that the > lite-restore preemption is complete. > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> With some handholding in irc, I can now associate the past, the code and the commit desc and it holds true. I also tested it lightly with kbl. Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_debugfs.c | 18 ++++++++++++------ > drivers/gpu/drm/i915/intel_lrc.c | 7 +++---- > drivers/gpu/drm/i915/intel_ringbuffer.h | 1 - > 3 files changed, 15 insertions(+), 11 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c > index fa69d72fdcb9..9d7a77ecec3d 100644 > --- a/drivers/gpu/drm/i915/i915_debugfs.c > +++ b/drivers/gpu/drm/i915/i915_debugfs.c > @@ -3320,15 +3320,21 @@ static int i915_engine_info(struct seq_file *m, void *unused) > > rcu_read_lock(); > rq = READ_ONCE(engine->execlist_port[0].request); > - if (rq) > - print_request(m, rq, "\t\tELSP[0] "); > - else > + if (rq) { > + seq_printf(m, "\t\tELSP[0] count=%d, ", > + engine->execlist_port[0].count); > + print_request(m, rq, "rq: "); > + } else { > seq_printf(m, "\t\tELSP[0] idle\n"); > + } > rq = READ_ONCE(engine->execlist_port[1].request); > - if (rq) > - print_request(m, rq, "\t\tELSP[1] "); > - else > + if (rq) { > + seq_printf(m, "\t\tELSP[1] count=%d, ", > + engine->execlist_port[1].count); > + print_request(m, rq, "rq: "); > + } else { > seq_printf(m, "\t\tELSP[1] idle\n"); > + } > rcu_read_unlock(); > > spin_lock_irq(&engine->timeline->lock); > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index eceffe25c022..873c3a8a580b 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -388,7 +388,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) > execlists_context_status_change(port[0].request, > INTEL_CONTEXT_SCHEDULE_IN); > desc[0] = execlists_update_context(port[0].request); > - engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */ > + port[0].count++; > > if (port[1].request) { > GEM_BUG_ON(port[1].count); > @@ -558,7 +558,8 @@ static bool execlists_elsp_ready(struct intel_engine_cs *engine) > int port; > > port = 1; /* wait for a free slot */ > - if (engine->disable_lite_restore_wa || engine->preempt_wa) > + if (engine->disable_lite_restore_wa || > + engine->execlist_port[0].count > 1) > port = 0; /* wait for GPU to be idle before continuing */ > > return !engine->execlist_port[port].request; > @@ -609,8 +610,6 @@ static void intel_lrc_irq_handler(unsigned long data) > i915_gem_request_put(port[0].request); > port[0] = port[1]; > memset(&port[1], 0, sizeof(port[1])); > - > - engine->preempt_wa = false; > } > > GEM_BUG_ON(port[0].count == 0 && > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h > index 79c2b8d72322..34cdbb6350a8 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h > @@ -381,7 +381,6 @@ struct intel_engine_cs { > struct rb_node *execlist_first; > unsigned int fw_domains; > bool disable_lite_restore_wa; > - bool preempt_wa; > u32 ctx_desc_template; > > /* Contexts are pinned whilst they are active on the GPU. The last > -- > 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx