Re: [PATCH 2/7] drm/i915: Wrap engine->context_pin() and engine->context_unpin()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 26/04/2018 18:49, Chris Wilson wrote:
Make life easier in upcoming patches by moving the context_pin and
context_unpin vfuncs into inline helpers.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
  drivers/gpu/drm/i915/gvt/mmio_context.c      |  2 +-
  drivers/gpu/drm/i915/gvt/scheduler.c         | 20 ++++++-------
  drivers/gpu/drm/i915/i915_debugfs.c          | 20 +++++++------
  drivers/gpu/drm/i915/i915_gem.c              |  4 +--
  drivers/gpu/drm/i915/i915_gem_context.c      |  8 +++---
  drivers/gpu/drm/i915/i915_gem_context.h      | 30 +++++++++++++++++++-
  drivers/gpu/drm/i915/i915_gpu_error.c        |  3 +-
  drivers/gpu/drm/i915/i915_perf.c             |  9 +++---
  drivers/gpu/drm/i915/i915_request.c          |  6 ++--
  drivers/gpu/drm/i915/intel_engine_cs.c       | 13 ++++-----
  drivers/gpu/drm/i915/intel_guc_ads.c         |  3 +-
  drivers/gpu/drm/i915/intel_guc_submission.c  |  5 ++--
  drivers/gpu/drm/i915/intel_lrc.c             | 29 +++++++++++--------
  drivers/gpu/drm/i915/intel_lrc.h             |  2 +-
  drivers/gpu/drm/i915/intel_ringbuffer.c      | 19 +++++++------
  drivers/gpu/drm/i915/selftests/mock_engine.c |  2 +-
  16 files changed, 108 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index a5bac83d53a9..0f949554d118 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -448,7 +448,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
  {
-	u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
+	u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
  	u32 inhibit_mask =
  		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 35f7cfd7a6b4..ffb45a9ee228 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -58,7 +58,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
  	int ring_id = workload->ring_id;
  	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
  	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		shadow_ctx->__engine[ring_id].state->obj;
  	struct execlist_ring_context *shadow_ring_context;
  	struct page *page;
@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
  	int ring_id = workload->ring_id;
  	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
  	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		shadow_ctx->__engine[ring_id].state->obj;
  	struct execlist_ring_context *shadow_ring_context;
  	struct page *page;
  	void *dst;
@@ -283,7 +283,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
  static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
  		struct intel_engine_cs *engine)
  {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
  	u64 desc = 0;
desc = ce->lrc_desc;
@@ -389,7 +389,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
  	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
  	 * the guest context, gvt can unpin the shadow_ctx safely.
  	 */
-	ring = engine->context_pin(engine, shadow_ctx);
+	ring = intel_context_pin(shadow_ctx, engine);
  	if (IS_ERR(ring)) {
  		ret = PTR_ERR(ring);
  		gvt_vgpu_err("fail to pin shadow context\n");
@@ -403,7 +403,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
  	return 0;
err_unpin:
-	engine->context_unpin(engine, shadow_ctx);
+	intel_context_unpin(shadow_ctx, engine);
  err_shadow:
  	release_shadow_wa_ctx(&workload->wa_ctx);
  err_scan:
@@ -437,7 +437,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
  	return 0;
err_unpin:
-	engine->context_unpin(engine, shadow_ctx);
+	intel_context_unpin(shadow_ctx, engine);
  	release_shadow_wa_ctx(&workload->wa_ctx);
  	return ret;
  }
@@ -526,7 +526,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  	struct intel_vgpu_submission *s = &workload->vgpu->submission;
  	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
  	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		shadow_ctx->__engine[ring_id].state->obj;
  	struct execlist_ring_context *shadow_ring_context;
  	struct page *page;
@@ -688,7 +688,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ret = prepare_workload(workload);
  	if (ret) {
-		engine->context_unpin(engine, shadow_ctx);
+		intel_context_unpin(shadow_ctx, engine);
  		goto out;
  	}
@@ -771,7 +771,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
  	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
  	int ring_id = workload->ring_id;
  	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		shadow_ctx->__engine[ring_id].state->obj;
  	struct execlist_ring_context *shadow_ring_context;
  	struct page *page;
  	void *src;
@@ -898,7 +898,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
  		}
  		mutex_lock(&dev_priv->drm.struct_mutex);
  		/* unpin shadow ctx as the shadow_ctx update is done */
-		engine->context_unpin(engine, s->shadow_ctx);
+		intel_context_unpin(s->shadow_ctx, engine);
  		mutex_unlock(&dev_priv->drm.struct_mutex);
  	}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 83c86257fe1c..601041c6f9ae 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -377,16 +377,19 @@ static void print_batch_pool_stats(struct seq_file *m,
  	print_file_stats(m, "[k]batch pool", stats);
  }
-static int per_file_ctx_stats(int id, void *ptr, void *data)
+static int per_file_ctx_stats(int idx, void *ptr, void *data)
  {
  	struct i915_gem_context *ctx = ptr;
-	int n;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, ctx->i915, id) {
+		struct intel_context *ce = to_intel_context(ctx, engine);
- for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
-		if (ctx->engine[n].state)
-			per_file_stats(0, ctx->engine[n].state->obj, data);
-		if (ctx->engine[n].ring)
-			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
+		if (ce->state)
+			per_file_stats(0, ce->state->obj, data);
+		if (ce->ring)
+			per_file_stats(0, ce->ring->vma->obj, data);
  	}
return 0;
@@ -1959,7 +1962,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
  		seq_putc(m, '\n');
for_each_engine(engine, dev_priv, id) {
-			struct intel_context *ce = &ctx->engine[engine->id];
+			struct intel_context *ce =
+				to_intel_context(ctx, engine);
seq_printf(m, "%s: ", engine->name);
  			if (ce->state)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6b0c67a4f214..4090bfdda340 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3234,7 +3234,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
  				      stalled_mask & ENGINE_MASK(id));
  		ctx = fetch_and_zero(&engine->last_retired_context);
  		if (ctx)
-			engine->context_unpin(engine, ctx);
+			intel_context_unpin(ctx, engine);
/*
  		 * Ostensibily, we always want a context loaded for powersaving,
@@ -5291,7 +5291,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
  	for_each_engine(engine, i915, id) {
  		struct i915_vma *state;
- state = ctx->engine[id].state;
+		state = to_intel_context(ctx, engine)->state;
  		if (!state)
  			continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 74435affe23f..59d4bd4a7b73 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -117,15 +117,15 @@ static void lut_close(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
  {
-	int i;
+	unsigned int n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
i915_ppgtt_put(ctx->ppgtt); - for (i = 0; i < I915_NUM_ENGINES; i++) {
-		struct intel_context *ce = &ctx->engine[i];
+	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+		struct intel_context *ce = &ctx->__engine[n];
if (!ce->state)
  			continue;
@@ -521,7 +521,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
  		if (!engine->last_retired_context)
  			continue;
- engine->context_unpin(engine, engine->last_retired_context);
+		intel_context_unpin(engine->last_retired_context, engine);
  		engine->last_retired_context = NULL;
  	}
  }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index b12a8a8c5af9..ace3b129c189 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -149,7 +149,7 @@ struct i915_gem_context {
  		u32 *lrc_reg_state;
  		u64 lrc_desc;
  		int pin_count;
-	} engine[I915_NUM_ENGINES];
+	} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
  	u32 ring_size;
@@ -256,6 +256,34 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
  	return !ctx->file_priv;
  }
+static inline struct intel_context *
+to_intel_context(struct i915_gem_context *ctx,
+		 const struct intel_engine_cs *engine)
+{
+	return &ctx->__engine[engine->id];
+}
+
+static inline struct intel_ring *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+	return engine->context_pin(engine, ctx);
+}
+
+static inline void __intel_context_pin(struct i915_gem_context *ctx,
+				       const struct intel_engine_cs *engine)
+{
+	struct intel_context *ce = to_intel_context(ctx, engine);
+
+	GEM_BUG_ON(!ce->pin_count);
+	ce->pin_count++;
+}

Unused in this patch, but it also breaks layering to avoid a function call. Slight grumble but OK, not the worst thing in our codebase. I don't have any ideas on how to have it all.

+
+static inline void intel_context_unpin(struct i915_gem_context *ctx,
+				       struct intel_engine_cs *engine)
+{
+	engine->context_unpin(engine, ctx);
+}
+
  /* i915_gem_context.c */
  int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
  void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 671ffa37614e..c0127965b578 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1472,7 +1472,8 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx =
  				i915_error_object_create(i915,
-							 request->ctx->engine[i].state);
+							 to_intel_context(request->ctx,
+									  engine)->state);
error->simulated |=
  				i915_gem_context_no_error_capture(request->ctx);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index bfc906cd4e5e..4b1da01168ae 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1234,7 +1234,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  		 *
  		 * NB: implied RCS engine...
  		 */
-		ring = engine->context_pin(engine, stream->ctx);
+		ring = intel_context_pin(stream->ctx, engine);
  		mutex_unlock(&dev_priv->drm.struct_mutex);
  		if (IS_ERR(ring))
  			return PTR_ERR(ring);
@@ -1246,7 +1246,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
  		 * with gen8+ and execlists
  		 */
  		dev_priv->perf.oa.specific_ctx_id =
-			i915_ggtt_offset(stream->ctx->engine[engine->id].state);
+			i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
  	}
return 0;
@@ -1271,7 +1271,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
  		mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-		engine->context_unpin(engine, stream->ctx);
+		intel_context_unpin(stream->ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex);
  	}
@@ -1759,6 +1759,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
  static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
  				       const struct i915_oa_config *oa_config)
  {
+	struct intel_engine_cs *engine = dev_priv->engine[RCS];
  	struct i915_gem_context *ctx;
  	int ret;
  	unsigned int wait_flags = I915_WAIT_LOCKED;
@@ -1789,7 +1790,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
  	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-		struct intel_context *ce = &ctx->engine[RCS];
+		struct intel_context *ce = to_intel_context(ctx, engine);
  		u32 *regs;
/* OA settings will be set upon first use */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b1993d4a1a53..9358f2cf0c32 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -409,7 +409,7 @@ static void i915_request_retire(struct i915_request *request)
  	 * the subsequent request.
  	 */
  	if (engine->last_retired_context)
-		engine->context_unpin(engine, engine->last_retired_context);
+		intel_context_unpin(engine->last_retired_context, engine);
  	engine->last_retired_context = request->ctx;
spin_lock_irq(&request->lock);
@@ -638,7 +638,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
  	 * GGTT space, so do this first before we reserve a seqno for
  	 * ourselves.
  	 */
-	ring = engine->context_pin(engine, ctx);
+	ring = intel_context_pin(ctx, engine);
  	if (IS_ERR(ring))
  		return ERR_CAST(ring);
  	GEM_BUG_ON(!ring);
@@ -787,7 +787,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
  err_unreserve:
  	unreserve_gt(i915);
  err_unpin:
-	engine->context_unpin(engine, ctx);
+	intel_context_unpin(ctx, engine);
  	return ERR_PTR(ret);
  }
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index eba81d55dc3a..238c8d3da041 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -685,7 +685,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
  	 * be available. To avoid this we always pin the default
  	 * context.
  	 */
-	ring = engine->context_pin(engine, engine->i915->kernel_context);
+	ring = intel_context_pin(engine->i915->kernel_context, engine);
  	if (IS_ERR(ring))
  		return PTR_ERR(ring);
@@ -694,8 +694,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
  	 * we can interrupt the engine at any time.
  	 */
  	if (engine->i915->preempt_context) {
-		ring = engine->context_pin(engine,
-					   engine->i915->preempt_context);
+		ring = intel_context_pin(engine->i915->preempt_context, engine);
  		if (IS_ERR(ring)) {
  			ret = PTR_ERR(ring);
  			goto err_unpin_kernel;
@@ -719,9 +718,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
  	intel_engine_fini_breadcrumbs(engine);
  err_unpin_preempt:
  	if (engine->i915->preempt_context)
-		engine->context_unpin(engine, engine->i915->preempt_context);
+		intel_context_unpin(engine->i915->preempt_context, engine);
  err_unpin_kernel:
-	engine->context_unpin(engine, engine->i915->kernel_context);
+	intel_context_unpin(engine->i915->kernel_context, engine);
  	return ret;
  }
@@ -749,8 +748,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
  		i915_gem_object_put(engine->default_state);
if (engine->i915->preempt_context)
-		engine->context_unpin(engine, engine->i915->preempt_context);
-	engine->context_unpin(engine, engine->i915->kernel_context);
+		intel_context_unpin(engine->i915->preempt_context, engine);
+	intel_context_unpin(engine->i915->kernel_context, engine);
  }
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index 334cb5202e1c..dcaa3fb71765 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -121,7 +121,8 @@ int intel_guc_ads_create(struct intel_guc *guc)
  	 * to find it. Note that we have to skip our header (1 page),
  	 * because our GuC shared data is there.
  	 */
-	kernel_ctx_vma = dev_priv->kernel_context->engine[RCS].state;
+	kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
+					  dev_priv->engine[RCS])->state;
  	blob->ads.golden_context_lrca =
  		intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 02da05875aa7..6e6ed0f46bd3 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -362,7 +362,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
  	desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
-		struct intel_context *ce = &ctx->engine[engine->id];
+		struct intel_context *ce = to_intel_context(ctx, engine);
  		u32 guc_engine_id = engine->guc_id;
  		struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
@@ -990,7 +990,8 @@ static void guc_fill_preempt_context(struct intel_guc *guc)
  	enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
-		struct intel_context *ce = &client->owner->engine[id];
+		struct intel_context *ce =
+			to_intel_context(client->owner, engine);
  		u32 addr = intel_hws_preempt_done_address(engine);
  		u32 *cs;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ce23d5116482..79af778621a1 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -223,7 +223,7 @@ static void
  intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
  				   struct intel_engine_cs *engine)
  {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
  	u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -414,7 +414,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
  {
-	struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
+	struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
  	struct i915_hw_ppgtt *ppgtt =
  		rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
  	u32 *reg_state = ce->lrc_reg_state;
@@ -523,7 +523,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
  {
  	struct intel_engine_execlists *execlists = &engine->execlists;
  	struct intel_context *ce =
-		&engine->i915->preempt_context->engine[engine->id];
+		to_intel_context(engine->i915->preempt_context, engine);
  	unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -1328,7 +1328,7 @@ static struct intel_ring *
  execlists_context_pin(struct intel_engine_cs *engine,
  		      struct i915_gem_context *ctx)
  {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
  	void *vaddr;
  	int ret;
@@ -1381,7 +1381,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
  static void execlists_context_unpin(struct intel_engine_cs *engine,
  				    struct i915_gem_context *ctx)
  {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  	GEM_BUG_ON(ce->pin_count == 0);
@@ -1400,8 +1400,8 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
static int execlists_request_alloc(struct i915_request *request)
  {
-	struct intel_engine_cs *engine = request->engine;
-	struct intel_context *ce = &request->ctx->engine[engine->id];
+	struct intel_context *ce =
+		to_intel_context(request->ctx, request->engine);
  	int ret;
GEM_BUG_ON(!ce->pin_count);
@@ -1855,7 +1855,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
  	 * future request will be after userspace has had the opportunity
  	 * to recreate its own state.
  	 */
-	ce = &request->ctx->engine[engine->id];
+	ce = to_intel_context(request->ctx, engine);
  	execlists_init_reg_state(ce->lrc_reg_state,
  				 request->ctx, engine, ce->ring);
@@ -2296,9 +2296,13 @@ static int logical_ring_init(struct intel_engine_cs *engine)
  	}
engine->execlists.preempt_complete_status = ~0u;
-	if (engine->i915->preempt_context)
+	if (engine->i915->preempt_context) {
+		struct intel_context *ce =
+			to_intel_context(engine->i915->preempt_context, engine);
+
  		engine->execlists.preempt_complete_status =
-			upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
+			upper_32_bits(ce->lrc_desc);
+	}
return 0; @@ -2580,7 +2584,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
  					    struct intel_engine_cs *engine)
  {
  	struct drm_i915_gem_object *ctx_obj;
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
  	struct i915_vma *vma;
  	uint32_t context_size;
  	struct intel_ring *ring;
@@ -2651,7 +2655,8 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
  	 */
  	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
  		for_each_engine(engine, dev_priv, id) {
-			struct intel_context *ce = &ctx->engine[engine->id];
+			struct intel_context *ce =
+				to_intel_context(ctx, engine);
  			u32 *reg;
if (!ce->state)
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 59d7b86012e9..4ec7d8dd13c8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -108,7 +108,7 @@ static inline uint64_t
  intel_lr_context_descriptor(struct i915_gem_context *ctx,
  			    struct intel_engine_cs *engine)
  {
-	return ctx->engine[engine->id].lrc_desc;
+	return to_intel_context(ctx, engine)->lrc_desc;
  }
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c06c22c953b3..69ffc0dfe92b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -558,7 +558,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
  	 */
  	if (request) {
  		struct drm_i915_private *dev_priv = request->i915;
-		struct intel_context *ce = &request->ctx->engine[engine->id];
+		struct intel_context *ce = to_intel_context(request->ctx,
+							    engine);
  		struct i915_hw_ppgtt *ppgtt;
if (ce->state) {
@@ -1163,9 +1164,9 @@ intel_ring_free(struct intel_ring *ring)
  	kfree(ring);
  }
-static int context_pin(struct i915_gem_context *ctx)
+static int context_pin(struct intel_context *ce)
  {
-	struct i915_vma *vma = ctx->engine[RCS].state;
+	struct i915_vma *vma = ce->state;
  	int ret;
/*
@@ -1256,7 +1257,7 @@ static struct intel_ring *
  intel_ring_context_pin(struct intel_engine_cs *engine,
  		       struct i915_gem_context *ctx)
  {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
  	int ret;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
@@ -1278,7 +1279,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
  	}
if (ce->state) {
-		ret = context_pin(ctx);
+		ret = context_pin(ce);
  		if (ret)
  			goto err;
@@ -1299,7 +1300,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
  static void intel_ring_context_unpin(struct intel_engine_cs *engine,
  				     struct i915_gem_context *ctx)
  {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  	GEM_BUG_ON(ce->pin_count == 0);
@@ -1427,7 +1428,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
  	*cs++ = MI_SET_CONTEXT;
-	*cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags;
+	*cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
  	/*
  	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
  	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1518,7 +1519,7 @@ static int switch_context(struct i915_request *rq)
  		hw_flags = MI_FORCE_RESTORE;
  	}
- if (to_ctx->engine[engine->id].state &&
+	if (to_intel_context(to_ctx, engine)->state &&
  	    (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
  		GEM_BUG_ON(engine->id != RCS);
@@ -1566,7 +1567,7 @@ static int ring_request_alloc(struct i915_request *request)
  {
  	int ret;
- GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
+	GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
/* Flush enough space to reduce the likelihood of waiting after
  	 * we start building the request - in which case we will just
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 78a89efa1119..3ed0557316d4 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -217,7 +217,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
  	GEM_BUG_ON(timer_pending(&mock->hw_delay));
if (engine->last_retired_context)
-		engine->context_unpin(engine, engine->last_retired_context);
+		intel_context_unpin(engine->last_retired_context, engine);
intel_engine_fini_breadcrumbs(engine);

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux