For flexible LRC context creation, we factor out the core logic of LRC context creation as __intel_lr_context_deferred_alloc(). For the hard-coded LRC context configurations, we keep them in the upper-level function intel_lr_context_deferred_alloc(). Signed-off-by: Zhi Wang <zhi.a.wang@xxxxxxxxx> --- drivers/gpu/drm/i915/intel_lrc.c | 46 ++++++++++++++++++++++++++++++---------- drivers/gpu/drm/i915/intel_lrc.h | 8 +++++++ 2 files changed, 43 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 3a03646..599687f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2525,22 +2525,19 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, } /** - * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context + * __intel_lr_context_deferred_alloc() - core logic of creating a LRC context * @ctx: LR context to create. - * @ring: engine to be used with the context. + * @params: parameters to specify configurable options of the context. * - * This function can be called more than once, with different engines, if we plan - * to use the context with them. The context backing objects and the ringbuffers - * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why - * the creation is a deferred call: it's better to make sure first that we need to use - * a given ring with the context. + * This function is the core logic of creating and initializing a LRC context * * Return: non-zero on error. */ -int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *ring) +int __intel_lr_context_deferred_alloc(struct intel_context *ctx, + struct intel_lr_context_alloc_params *params) { + struct intel_engine_cs *ring = params->ring; struct drm_device *dev = ring->dev; struct drm_i915_gem_object *ctx_obj; uint32_t context_size; @@ -2561,7 +2558,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, return -ENOMEM; } - ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); + ringbuf = intel_engine_create_ringbuffer(ring, params->ringbuffer_size); if (IS_ERR(ringbuf)) { ret = PTR_ERR(ringbuf); goto error_deref_obj; @@ -2576,7 +2573,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ctx->engine[ring->id].ringbuf = ringbuf; ctx->engine[ring->id].state = ctx_obj; - if (ctx != ctx->i915->kernel_context && ring->init_context) { + if (params->ctx_needs_init && ring->init_context) { struct drm_i915_gem_request *req; req = i915_gem_request_alloc(ring, ctx); @@ -2606,6 +2603,33 @@ error_deref_obj: return ret; } +/** + * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context + * @ctx: LR context to create. + * @ring: engine to be used with the context. + * + * This function can be called more than once, with different engines, if we + * plan to use the context with them. The context backing objects and the + * ringbuffers (specially the ringbuffer backing objects) suck a lot of memory + * up, and that's why the creation is a deferred call: it's better to make sure + * firstthat we need to use a given ring with the context. + * + * Return: non-zero on error. + */ +int intel_lr_context_deferred_alloc(struct intel_context *ctx, + struct intel_engine_cs *ring) +{ + struct intel_lr_context_alloc_params params; + + memset(¶ms, 0, sizeof(params)); + + params.ring = ring; + params.ringbuffer_size = 4 * PAGE_SIZE; + params.ctx_needs_init = (ctx != ctx->i915->kernel_context); + + return __intel_lr_context_deferred_alloc(ctx, ¶ms); +} + void intel_lr_context_reset(struct drm_device *dev, struct intel_context *ctx) { diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index e6cda3e..528c4fb 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -97,10 +97,18 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) +struct intel_lr_context_alloc_params { + struct intel_engine_cs *ring; + u32 ringbuffer_size; + bool ctx_needs_init; +}; + void intel_lr_context_free(struct intel_context *ctx); uint32_t intel_lr_context_size(struct intel_engine_cs *ring); int intel_lr_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *ring); +int __intel_lr_context_deferred_alloc(struct intel_context *ctx, + struct intel_lr_context_alloc_params *params); void intel_lr_context_unpin(struct intel_context *ctx, struct intel_engine_cs *engine); void intel_lr_context_reset(struct drm_device *dev, -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx