On unwinding the active request we give it a small (limited to internal priority levels) boost to prevent it from being gazumped a second time. However, this means that it can be promoted to above the request that triggered the preemption request, causing a preempt-to-idle cycle for no change. We can avoid this if we take the boost into account when checking if the preemption request is valid. v2: After preemption the active request will be after the preemptee if they end up with equal priority. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/intel_lrc.c | 39 ++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d9d744f6ab2c..65997ed055b2 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -163,6 +163,8 @@ #define WA_TAIL_DWORDS 2 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) +#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT) + static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine, struct intel_context *ce); @@ -181,13 +183,41 @@ static inline int rq_prio(const struct i915_request *rq) return rq->sched.attr.priority; } +static inline int active_prio(const struct i915_request *rq) +{ + int prio = rq_prio(rq); + + /* + * On unwinding the active request, we give it a priority bump + * equivalent to a freshly submitted request. This protects it from + * being gazumped again, but it would be preferable if we didn't + * let it be gazumped in the first place! + * + * See __unwind_incomplete_requests() + */ + if ((prio & ACTIVE_PRIORITY) != ACTIVE_PRIORITY && + i915_request_started(rq)) { + /* + * After preemption, we insert the active request at the + * end of the new priority level. This means that we will be + * _lower_ priority than the preemptee all things equal (and + * so the preemption is valid), so adjust our comparison + * accordingly. + */ + prio |= ACTIVE_PRIORITY; + prio--; + } + + return prio; +} + static inline bool need_preempt(const struct intel_engine_cs *engine, const struct i915_request *rq, int q_prio) { const struct intel_context *ctx = rq->hw_context; - const int last_prio = rq_prio(rq); struct rb_node *rb; + int last_prio; int idx; if (!intel_engine_has_preemption(engine)) @@ -196,6 +226,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, if (i915_request_completed(rq)) return false; + last_prio = active_prio(rq); if (!__execlists_need_preempt(q_prio, last_prio)) return false; @@ -320,7 +351,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) { struct i915_request *rq, *rn, *active = NULL; struct list_head *uninitialized_var(pl); - int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT; + int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY; lockdep_assert_held(&engine->timeline.lock); @@ -352,8 +383,8 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) * stream, so give it the equivalent small priority bump to prevent * it being gazumped a second time by another peer. */ - if (!(prio & I915_PRIORITY_NEWCLIENT)) { - prio |= I915_PRIORITY_NEWCLIENT; + if ((prio & ACTIVE_PRIORITY) != ACTIVE_PRIORITY) { + prio |= ACTIVE_PRIORITY; active->sched.attr.priority = prio; list_move_tail(&active->sched.link, i915_sched_lookup_priolist(engine, prio)); -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx