[PATCH] drm/i915: Remove unused workaround bb code and unused struct members

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>

After recent code changes this part of the code now looks over-engineered
and can be simplified, together with the corresponding struct members.

Also correct the comment describing the unit of workaround bb size.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
---
 drivers/gpu/drm/i915/intel_lrc.c        | 57 +++++++++------------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  9 ++----
 2 files changed, 18 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ff25f209d0a5..3ab1021d8518 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1260,10 +1260,10 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 }
 
 /*
- * Typically we only have one indirect_ctx and per_ctx batch buffer which are
- * initialized at the beginning and shared across all contexts but this field
- * helps us to have multiple batches at different offsets and select them based
- * on a criteria. At the moment this batch always start at the beginning of the page
+ * Typically we only have one indirect_ctx batch buffer which is initialized
+ * at the beginning and shared across all contexts but this field helps us
+ * to have multiple batches at different offsets and select them based on a
+ * criteria. At the moment this batch always start at the beginning of the page
  * and at this point we don't have multiple wa_ctx batch buffers.
  *
  * The number of WA applied are not known at the beginning; we use this field
@@ -1406,12 +1406,10 @@ typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 {
 	struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
-	struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
-					    &wa_ctx->per_ctx };
-	wa_bb_func_t wa_bb_fn[2];
+	struct i915_wa_ctx_bb *wa_bb = &wa_ctx->indirect_ctx;
+	wa_bb_func_t wa_bb_fn;
 	struct page *page;
-	void *batch, *batch_ptr;
-	unsigned int i;
+	void *batch;
 	int ret;
 
 	if (WARN_ON(engine->id != RCS || !engine->scratch))
@@ -1421,12 +1419,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	case 10:
 		return 0;
 	case 9:
-		wa_bb_fn[0] = gen9_init_indirectctx_bb;
-		wa_bb_fn[1] = NULL;
+		wa_bb_fn = gen9_init_indirectctx_bb;
 		break;
 	case 8:
-		wa_bb_fn[0] = gen8_init_indirectctx_bb;
-		wa_bb_fn[1] = NULL;
+		wa_bb_fn = gen8_init_indirectctx_bb;
 		break;
 	default:
 		MISSING_CASE(INTEL_GEN(engine->i915));
@@ -1440,31 +1436,16 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	}
 
 	page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
-	batch = batch_ptr = kmap_atomic(page);
+	batch = kmap_atomic(page);
 
-	/*
-	 * Emit the two workaround batch buffers, recording the offset from the
-	 * start of the workaround batch buffer object for each and their
-	 * respective sizes.
-	 */
-	for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
-		wa_bb[i]->offset = batch_ptr - batch;
-		if (WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, CACHELINE_BYTES))) {
-			ret = -EINVAL;
-			break;
-		}
-		if (wa_bb_fn[i])
-			batch_ptr = wa_bb_fn[i](engine, batch_ptr);
-		wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
-	}
-
-	BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
+	/* Emit the workaround batch buffer, recording its size. */
+	wa_bb->size = (void *)wa_bb_fn(engine, batch) - batch;
+	GEM_BUG_ON(wa_bb->size > CTX_WA_BB_OBJ_SIZE ||
+		   !IS_ALIGNED(wa_bb->size, CACHELINE_BYTES));
 
 	kunmap_atomic(batch);
-	if (ret)
-		lrc_destroy_wa_ctx(engine);
 
-	return ret;
+	return 0;
 }
 
 static u8 gtiir[] = {
@@ -2184,7 +2165,7 @@ static void execlists_init_reg_state(u32 *regs,
 			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
 
 			regs[CTX_RCS_INDIRECT_CTX + 1] =
-				(ggtt_offset + wa_ctx->indirect_ctx.offset) |
+				ggtt_offset |
 				(wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
 
 			regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
@@ -2192,12 +2173,6 @@ static void execlists_init_reg_state(u32 *regs,
 		}
 
 		CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
-		if (wa_ctx->per_ctx.size) {
-			u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
-			regs[CTX_BB_PER_CTX_PTR + 1] =
-				(ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
-		}
 	}
 
 	regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c5ff203e42d6..6aa184f6ad2b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -142,17 +142,12 @@ struct drm_i915_reg_table;
  * values are referred in terms of dwords
  *
  * struct i915_wa_ctx_bb:
- *  offset: specifies batch starting position, also helpful in case
- *    if we want to have multiple batches at different offsets based on
- *    some criteria. It is not a requirement at the moment but provides
- *    an option for future use.
- *  size: size of the batch in DWORDS
+ *  size: size of the batch in bytes
  */
 struct i915_ctx_workarounds {
 	struct i915_wa_ctx_bb {
-		u32 offset;
 		u32 size;
-	} indirect_ctx, per_ctx;
+	} indirect_ctx;
 	struct i915_vma *vma;
 };
 
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux