Instead of allocating a new request when allocating a context, use the request that initiated the allocation to emit the context initialisation. This serves two purposes, it makes the initialisation atomic with first use (simplifying scheduling and our own error handling). Secondly, it enables us to remove the explicit context allocation required by higher levels of GEM and make that property of execlists opaque (in the next patch). There is also a minor step forwards towards convergence of legacy/execlist contexts. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_lrc.c | 47 +++++++++++++++++++++++----------------- 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 290320a82aca..5fa525f91a16 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -868,6 +868,7 @@ struct intel_context { struct i915_vma *lrc_vma; u64 lrc_desc; uint32_t *lrc_reg_state; + bool initialised; } engine[I915_NUM_ENGINES]; struct list_head link; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 93b2d1317433..af80a056dc2f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -672,9 +672,10 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) { + struct intel_engine_cs *engine = request->engine; int ret; - request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; + request->ringbuf = request->ctx->engine[engine->id].ringbuf; /* Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just @@ -697,7 +698,30 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request return ret; } - return intel_lr_context_pin(request->ctx, request->engine); + ret = intel_lr_context_pin(request->ctx, engine); + if (ret) + return ret; + + if (!request->ctx->engine[engine->id].initialised) { + ret = engine->init_context(request); + if (ret) + goto err_unpin; + + request->ctx->engine[engine->id].initialised = true; + } + + /* Note that after this point, we have committed to using + * this request as it is being used to both track the + * state of engine initialisation and liveness of the + * golden renderstate above. Think twice before you try + * to cancel/unwind this request now. + */ + + return 0; + +err_unpin: + intel_lr_context_unpin(request->ctx, engine); + return ret; } /* @@ -2499,25 +2523,8 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ctx->engine[engine->id].ringbuf = ringbuf; ctx->engine[engine->id].state = ctx_obj; + ctx->engine[engine->id].initialised = engine->init_context == NULL; - if (ctx != ctx->i915->kernel_context && engine->init_context) { - struct drm_i915_gem_request *req; - - req = i915_gem_request_alloc(engine, ctx); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - DRM_ERROR("ring create req: %d\n", ret); - goto error_ringbuf; - } - - ret = engine->init_context(req); - i915_add_request_no_flush(req); - if (ret) { - DRM_ERROR("ring init context: %d\n", - ret); - goto error_ringbuf; - } - } return 0; error_ringbuf: -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx