This is so that we have symmetry with intel_lrc.c and avoid a source of if (i915.enable_execlists) layering violation within i915_gem_context.c - that is we move the specific handling of the dev_priv->kernel_context for legacy submission into the legacy submission code. This depends upon the init/fini ordering between contexts and engines already defined by intel_lrc.c, and also exporting the context alignment required for pinning the legacy context. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem_context.c | 27 +++------------------------ drivers/gpu/drm/i915/intel_ringbuffer.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e4c8e341655c..19d0194c728f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -867,6 +867,8 @@ struct i915_gem_context { u32 user_handle; #define CONTEXT_NO_ZEROMAP (1<<0) + u32 ggtt_alignment; + struct intel_context { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a3b11aac23a4..c620fe6c9d96 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -268,6 +268,8 @@ __create_hw_context(struct drm_device *dev, list_add_tail(&ctx->link, &dev_priv->context_list); ctx->i915 = dev_priv; + ctx->ggtt_alignment = get_context_alignment(dev_priv); + if (dev_priv->hw_context_size) { struct drm_i915_gem_object *obj = i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); @@ -413,26 +415,6 @@ int i915_gem_context_init(struct drm_device *dev) return PTR_ERR(ctx); } - if (!i915.enable_execlists && ctx->engine[RCS].state) { - int ret; - - /* We may need to do things with the shrinker which - * require us to immediately switch back to the default - * context. This can cause a problem as pinning the - * default context also requires GTT space which may not - * be available. To avoid this we always pin the default - * context. - */ - ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state, - get_context_alignment(dev_priv), 0); - if (ret) { - DRM_ERROR("Failed to pinned default global context (error %d)\n", - ret); - i915_gem_context_unreference(ctx); - return ret; - } - } - dev_priv->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", @@ -469,9 +451,6 @@ void i915_gem_context_fini(struct drm_device *dev) lockdep_assert_held(&dev->struct_mutex); - if (!i915.enable_execlists && dctx->engine[RCS].state) - i915_gem_object_ggtt_unpin(dctx->engine[RCS].state); - i915_gem_context_unreference(dctx); dev_priv->kernel_context = NULL; @@ -721,7 +700,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) /* Trying to pin first makes error handling easier. */ ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state, - get_context_alignment(engine->i915), + to->ggtt_alignment, 0); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8d35a3978f9b..4e0aa7e9d5da 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2244,6 +2244,8 @@ static int intel_init_ring_buffer(struct drm_device *dev, struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_gem_context *kctx = dev_priv->kernel_context; + struct intel_context *ce = &kctx->engine[engine->id]; struct intel_ringbuffer *ringbuf; int ret; @@ -2260,6 +2262,25 @@ static int intel_init_ring_buffer(struct drm_device *dev, init_waitqueue_head(&engine->irq_queue); + if (ce->state) { + i915_gem_context_reference(kctx); + + /* We may need to do things with the shrinker which + * require us to immediately switch back to the default + * context. This can cause a problem as pinning the + * default context also requires GTT space which may not + * be available. To avoid this we always pin the default + * context. + */ + ret = i915_gem_obj_ggtt_pin(ce->state, + kctx->ggtt_alignment, + 0); + if (ret) + goto error; + + ce->initialised = false; + } + ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); if (IS_ERR(ringbuf)) { ret = PTR_ERR(ringbuf); @@ -2300,6 +2321,8 @@ error: void intel_cleanup_engine(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv; + struct i915_gem_context *kctx; + struct intel_context *ce; if (!intel_engine_initialized(engine)) return; @@ -2327,6 +2350,14 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); + + kctx = dev_priv->kernel_context; + ce = &kctx->engine[engine->id]; + if (ce->state) { + i915_gem_object_ggtt_unpin(ce->state); + i915_gem_context_unreference(kctx); + } + engine->i915 = NULL; } -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx