On Mon, Mar 26, 2018 at 12:50:41PM +0100, Chris Wilson wrote: > Install a timer when trying to preempt on behalf of an important > context such that if the active context does not honour the preemption > request within the desired timeout, then we reset the GPU to allow the > important context to run. > > (Open: should not the timer be started from receiving the high priority > request...) > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/intel_lrc.c | 53 +++++++++++++++++++++++++++++++++ > drivers/gpu/drm/i915/intel_ringbuffer.h | 8 +++++ > 2 files changed, 61 insertions(+) > > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index 50688fc889d9..6da816d23cb3 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -533,6 +533,47 @@ static void inject_preempt_context(struct intel_engine_cs *engine) > > execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); > execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); > + > + /* Set a timer to force preemption vs hostile userspace */ > + if (execlists->queue_preempt_timeout) { > + GEM_TRACE("%s timeout=%uns\n", > + engine->name, execlists->queue_preempt_timeout); > + hrtimer_start(&execlists->preempt_timer, > + ktime_set(0, execlists->queue_preempt_timeout), > + HRTIMER_MODE_REL); > + } > +} > + > +static enum hrtimer_restart preempt_timeout(struct hrtimer *hrtimer) > +{ > + struct intel_engine_execlists *execlists = > + container_of(hrtimer, typeof(*execlists), preempt_timer); > + > + GEM_TRACE("%s\n", > + container_of(execlists, > + struct intel_engine_cs, > + execlists)->name); > + > + queue_work(system_highpri_wq, &execlists->preempt_reset); > + return HRTIMER_NORESTART; > +} > + > +static void preempt_reset(struct work_struct *work) > +{ > + struct intel_engine_cs *engine = > + container_of(work, typeof(*engine), execlists.preempt_reset); > + > + GEM_TRACE("%s\n", engine->name); > + > + tasklet_disable(&engine->execlists.tasklet); > + > + engine->execlists.tasklet.func(engine->execlists.tasklet.data); This seems redundant with the execlists_reset_prepare already doing a process_csb at a later time to decide whether to execute or abort the reset. In your previous proposal you suggested tasklet disable and kill here. > + > + if (execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) > + i915_handle_error(engine->i915, BIT(engine->id), 0, > + "preemption timed out on %s", engine->name); > + > + tasklet_enable(&engine->execlists.tasklet); > } > > static void complete_preempt_context(struct intel_engine_execlists *execlists) > @@ -542,6 +583,10 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists) > execlists_cancel_port_requests(execlists); > execlists_unwind_incomplete_requests(execlists); > > + /* If the timer already fired, complete the reset */ > + if (hrtimer_try_to_cancel(&execlists->preempt_timer) < 0) > + return; > + > execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT); > } > > @@ -708,6 +753,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) > kmem_cache_free(engine->i915->priorities, p); > } > done: > + execlists->queue_preempt_timeout = 0; /* preemption point passed */ > execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN; > execlists->first = rb; > if (submit) > @@ -864,6 +910,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) > > /* Remaining _unready_ requests will be nop'ed when submitted */ > > + execlists->queue_preempt_timeout = 0; > execlists->queue_priority = INT_MIN; > execlists->queue = RB_ROOT; > execlists->first = NULL; > @@ -1080,6 +1127,7 @@ static void queue_request(struct intel_engine_cs *engine, > static void __submit_queue(struct intel_engine_cs *engine, int prio) > { > engine->execlists.queue_priority = prio; > + engine->execlists.queue_preempt_timeout = 0; > tasklet_hi_schedule(&engine->execlists.tasklet); > } > > @@ -2270,6 +2318,11 @@ logical_ring_setup(struct intel_engine_cs *engine) > tasklet_init(&engine->execlists.tasklet, > execlists_submission_tasklet, (unsigned long)engine); > > + INIT_WORK(&engine->execlists.preempt_reset, preempt_reset); > + hrtimer_init(&engine->execlists.preempt_timer, > + CLOCK_MONOTONIC, HRTIMER_MODE_REL); > + engine->execlists.preempt_timer.function = preempt_timeout; > + > logical_ring_default_vfuncs(engine); > logical_ring_default_irqs(engine); > } > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h > index 4c71dcdc722b..7166f47c8489 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h > @@ -284,6 +284,11 @@ struct intel_engine_execlists { > */ > int queue_priority; > > + /** > + * @queue_preempt_timeout: Timeout in ns before forcing preemption. > + */ > + unsigned int queue_preempt_timeout; > + > /** > * @queue: queue of requests, in priority lists > */ > @@ -313,6 +318,9 @@ struct intel_engine_execlists { > * @preempt_complete_status: expected CSB upon completing preemption > */ > u32 preempt_complete_status; > + > + struct hrtimer preempt_timer; > + struct work_struct preempt_reset; > }; > > #define INTEL_ENGINE_CS_MAX_NAME 8 > -- > 2.16.3 > _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx