On Mon, Sep 25, 2017 at 12:44:07PM +0100, Chris Wilson wrote: > Add another perma-pinned context for using for preemption at any time. > We cannot just reuse the existing kernel context, as first and foremost > we need to ensure that we can preempt the kernel context itself, so > require a distinct context id. Similar to the kernel context, we may > want to interrupt execution and switch to the preempt context at any > time, and so it needs to be permanently pinned and available. > > To compensate for yet another permanent allocation, we shrink the > existing context and the new context by reducing their ringbuffer to the > minimum. Since we're special treating preempt_context, we should also probably BUG_ON for any attempts to allocate requests for it. Other than that - LGTM. Reviewed-by: Michał Winiarski <michal.winiarski@xxxxxxxxx> -Michał > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_drv.h | 3 ++- > drivers/gpu/drm/i915/i915_gem_context.c | 42 ++++++++++++++++++++++++++------- > drivers/gpu/drm/i915/intel_engine_cs.c | 17 +++++++++++-- > 3 files changed, 51 insertions(+), 11 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index c1e93a61d81b..ef5aad540a12 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -2244,8 +2244,9 @@ struct drm_i915_private { > wait_queue_head_t gmbus_wait_queue; > > struct pci_dev *bridge_dev; > - struct i915_gem_context *kernel_context; > struct intel_engine_cs *engine[I915_NUM_ENGINES]; > + struct i915_gem_context *kernel_context; > + struct i915_gem_context *preempt_context; > struct i915_vma *semaphore; > > struct drm_dma_handle *status_page_dmah; > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c > index 921ee369c74d..1518e110fd04 100644 > --- a/drivers/gpu/drm/i915/i915_gem_context.c > +++ b/drivers/gpu/drm/i915/i915_gem_context.c > @@ -416,14 +416,29 @@ i915_gem_context_create_gvt(struct drm_device *dev) > return ctx; > } > > +static struct i915_gem_context * > +create_kernel_context(struct drm_i915_private *i915, int prio) > +{ > + struct i915_gem_context *ctx; > + > + ctx = i915_gem_create_context(i915, NULL); > + if (IS_ERR(ctx)) > + return ctx; > + > + i915_gem_context_clear_bannable(ctx); > + ctx->priority = prio; > + ctx->ring_size = PAGE_SIZE; > + > + return ctx; > +} > + > int i915_gem_contexts_init(struct drm_i915_private *dev_priv) > { > struct i915_gem_context *ctx; > > /* Init should only be called once per module load. Eventually the > * restriction on the context_disabled check can be loosened. */ > - if (WARN_ON(dev_priv->kernel_context)) > - return 0; > + GEM_BUG_ON(dev_priv->kernel_context); > > INIT_LIST_HEAD(&dev_priv->contexts.list); > INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); > @@ -441,23 +456,30 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) > BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); > ida_init(&dev_priv->contexts.hw_ida); > > - ctx = i915_gem_create_context(dev_priv, NULL); > + /* lowest priority; idle task */ > + ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN); > if (IS_ERR(ctx)) { > DRM_ERROR("Failed to create default global context (error %ld)\n", > PTR_ERR(ctx)); > return PTR_ERR(ctx); > } > > - /* For easy recognisablity, we want the kernel context to be 0 and then > + /* > + * For easy recognisablity, we want the kernel context to be 0 and then > * all user contexts will have non-zero hw_id. > */ > GEM_BUG_ON(ctx->hw_id); > - > - i915_gem_context_clear_bannable(ctx); > - ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */ > + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); > dev_priv->kernel_context = ctx; > > - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); > + /* highest priority; preempting task */ > + ctx = create_kernel_context(dev_priv, INT_MAX); > + if (IS_ERR(ctx)) { > + DRM_ERROR("Failed to create default preempt context (error %ld)\n", > + PTR_ERR(ctx)); > + return PTR_ERR(ctx); > + } > + dev_priv->preempt_context = ctx; > > DRM_DEBUG_DRIVER("%s context support initialized\n", > dev_priv->engine[RCS]->context_size ? "logical" : > @@ -517,6 +539,10 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) > context_close(ctx); > i915_gem_context_free(ctx); > > + ctx = i915_gem_context_get(fetch_and_zero(&i915->preempt_context)); > + context_close(ctx); > + i915_gem_context_free(ctx); > + > /* Must free all deferred contexts (via flush_workqueue) first */ > ida_destroy(&i915->contexts.hw_ida); > } > diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c > index a28e2a864cf1..f4bb11bb3b57 100644 > --- a/drivers/gpu/drm/i915/intel_engine_cs.c > +++ b/drivers/gpu/drm/i915/intel_engine_cs.c > @@ -613,9 +613,19 @@ int intel_engine_init_common(struct intel_engine_cs *engine) > if (IS_ERR(ring)) > return PTR_ERR(ring); > > + /* > + * Similarly the preempt context must always be available so that > + * we can interrupt the engine at any time. > + */ > + ring = engine->context_pin(engine, engine->i915->preempt_context); > + if (IS_ERR(ring)) { > + ret = PTR_ERR(ring); > + goto err_unpin_kernel; > + } > + > ret = intel_engine_init_breadcrumbs(engine); > if (ret) > - goto err_unpin; > + goto err_unpin_preempt; > > ret = i915_gem_render_state_init(engine); > if (ret) > @@ -634,7 +644,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine) > i915_gem_render_state_fini(engine); > err_breadcrumbs: > intel_engine_fini_breadcrumbs(engine); > -err_unpin: > +err_unpin_preempt: > + engine->context_unpin(engine, engine->i915->preempt_context); > +err_unpin_kernel: > engine->context_unpin(engine, engine->i915->kernel_context); > return ret; > } > @@ -660,6 +672,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) > intel_engine_cleanup_cmd_parser(engine); > i915_gem_batch_pool_fini(&engine->batch_pool); > > + engine->context_unpin(engine, engine->i915->preempt_context); > engine->context_unpin(engine, engine->i915->kernel_context); > } > > -- > 2.14.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx