Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > If we stop filling the ELSP due to an incompatible virtual engine > request, check if we should enable the timeslice on behalf of the queue. > > This fixes the case where we are inspecting the last->next element when > we know that the last element is the last request in the execution queue, > and so decided we did not need to enable timeslicing despite the intent > to do so! > > Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing") > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx> > Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> > Cc: <stable@xxxxxxxxxxxxxxx> # v5.4+ Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/gt/intel_lrc.c | 29 ++++++++++++++++++----------- > 1 file changed, 18 insertions(+), 11 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c > index 13941d1c0a4a..a1d268880cfe 100644 > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c > @@ -1757,11 +1757,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) > if (!intel_engine_has_timeslices(engine)) > return false; > > - if (list_is_last(&rq->sched.link, &engine->active.requests)) > - return false; > - > - hint = max(rq_prio(list_next_entry(rq, sched.link)), > - engine->execlists.queue_priority_hint); > + hint = engine->execlists.queue_priority_hint; > + if (!list_is_last(&rq->sched.link, &engine->active.requests)) > + hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); > > return hint >= effective_prio(rq); > } > @@ -1803,6 +1801,18 @@ static void set_timeslice(struct intel_engine_cs *engine) > set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); > } > > +static void start_timeslice(struct intel_engine_cs *engine) > +{ > + struct intel_engine_execlists *execlists = &engine->execlists; > + > + execlists->switch_priority_hint = execlists->queue_priority_hint; > + > + if (timer_pending(&execlists->timer)) > + return; > + > + set_timer_ms(&execlists->timer, timeslice(engine)); > +} > + > static void record_preemption(struct intel_engine_execlists *execlists) > { > (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); > @@ -1966,11 +1976,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) > * Even if ELSP[1] is occupied and not worthy > * of timeslices, our queue might be. > */ > - if (!execlists->timer.expires && > - need_timeslice(engine, last)) > - set_timer_ms(&execlists->timer, > - timeslice(engine)); > - > + start_timeslice(engine); > return; > } > } > @@ -2005,7 +2011,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) > > if (last && !can_merge_rq(last, rq)) { > spin_unlock(&ve->base.active.lock); > - return; /* leave this for another */ > + start_timeslice(engine); > + return; /* leave this for another sibling */ > } > > ENGINE_TRACE(engine, > -- > 2.25.1